msi.c revision 58e0543e8f355b32f0778a18858b255adb7402ae
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/smp_lock.h>
16#include <linux/pci.h>
17#include <linux/proc_fs.h>
18#include <linux/msi.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22#include <asm/smp.h>
23
24#include "pci.h"
25#include "msi.h"
26
27static struct kmem_cache* msi_cachep;
28
29static int pci_msi_enable = 1;
30
31static int msi_cache_init(void)
32{
33	msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
34					0, SLAB_HWCACHE_ALIGN, NULL, NULL);
35	if (!msi_cachep)
36		return -ENOMEM;
37
38	return 0;
39}
40
41static void msi_set_enable(struct pci_dev *dev, int enable)
42{
43	int pos;
44	u16 control;
45
46	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
47	if (pos) {
48		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
49		control &= ~PCI_MSI_FLAGS_ENABLE;
50		if (enable)
51			control |= PCI_MSI_FLAGS_ENABLE;
52		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
53	}
54}
55
56static void msix_set_enable(struct pci_dev *dev, int enable)
57{
58	int pos;
59	u16 control;
60
61	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
62	if (pos) {
63		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
64		control &= ~PCI_MSIX_FLAGS_ENABLE;
65		if (enable)
66			control |= PCI_MSIX_FLAGS_ENABLE;
67		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
68	}
69}
70
71static void msi_set_mask_bit(unsigned int irq, int flag)
72{
73	struct msi_desc *entry;
74
75	entry = get_irq_msi(irq);
76	BUG_ON(!entry || !entry->dev);
77	switch (entry->msi_attrib.type) {
78	case PCI_CAP_ID_MSI:
79		if (entry->msi_attrib.maskbit) {
80			int pos;
81			u32 mask_bits;
82
83			pos = (long)entry->mask_base;
84			pci_read_config_dword(entry->dev, pos, &mask_bits);
85			mask_bits &= ~(1);
86			mask_bits |= flag;
87			pci_write_config_dword(entry->dev, pos, mask_bits);
88		} else {
89			msi_set_enable(entry->dev, !flag);
90		}
91		break;
92	case PCI_CAP_ID_MSIX:
93	{
94		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
95			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
96		writel(flag, entry->mask_base + offset);
97		break;
98	}
99	default:
100		BUG();
101		break;
102	}
103}
104
105void read_msi_msg(unsigned int irq, struct msi_msg *msg)
106{
107	struct msi_desc *entry = get_irq_msi(irq);
108	switch(entry->msi_attrib.type) {
109	case PCI_CAP_ID_MSI:
110	{
111		struct pci_dev *dev = entry->dev;
112		int pos = entry->msi_attrib.pos;
113		u16 data;
114
115		pci_read_config_dword(dev, msi_lower_address_reg(pos),
116					&msg->address_lo);
117		if (entry->msi_attrib.is_64) {
118			pci_read_config_dword(dev, msi_upper_address_reg(pos),
119						&msg->address_hi);
120			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
121		} else {
122			msg->address_hi = 0;
123			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
124		}
125		msg->data = data;
126		break;
127	}
128	case PCI_CAP_ID_MSIX:
129	{
130		void __iomem *base;
131		base = entry->mask_base +
132			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
133
134		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
135		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
136		msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
137 		break;
138 	}
139 	default:
140		BUG();
141	}
142}
143
144void write_msi_msg(unsigned int irq, struct msi_msg *msg)
145{
146	struct msi_desc *entry = get_irq_msi(irq);
147	switch (entry->msi_attrib.type) {
148	case PCI_CAP_ID_MSI:
149	{
150		struct pci_dev *dev = entry->dev;
151		int pos = entry->msi_attrib.pos;
152
153		pci_write_config_dword(dev, msi_lower_address_reg(pos),
154					msg->address_lo);
155		if (entry->msi_attrib.is_64) {
156			pci_write_config_dword(dev, msi_upper_address_reg(pos),
157						msg->address_hi);
158			pci_write_config_word(dev, msi_data_reg(pos, 1),
159						msg->data);
160		} else {
161			pci_write_config_word(dev, msi_data_reg(pos, 0),
162						msg->data);
163		}
164		break;
165	}
166	case PCI_CAP_ID_MSIX:
167	{
168		void __iomem *base;
169		base = entry->mask_base +
170			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
171
172		writel(msg->address_lo,
173			base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
174		writel(msg->address_hi,
175			base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
176		writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
177		break;
178	}
179	default:
180		BUG();
181	}
182}
183
184void mask_msi_irq(unsigned int irq)
185{
186	msi_set_mask_bit(irq, 1);
187}
188
189void unmask_msi_irq(unsigned int irq)
190{
191	msi_set_mask_bit(irq, 0);
192}
193
194static int msi_free_irq(struct pci_dev* dev, int irq);
195
196static int msi_init(void)
197{
198	static int status = -ENOMEM;
199
200	if (!status)
201		return status;
202
203	status = msi_cache_init();
204	if (status < 0) {
205		pci_msi_enable = 0;
206		printk(KERN_WARNING "PCI: MSI cache init failed\n");
207		return status;
208	}
209
210	return status;
211}
212
213static struct msi_desc* alloc_msi_entry(void)
214{
215	struct msi_desc *entry;
216
217	entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
218	if (!entry)
219		return NULL;
220
221	entry->link.tail = entry->link.head = 0;	/* single message */
222	entry->dev = NULL;
223
224	return entry;
225}
226
227#ifdef CONFIG_PM
228static int __pci_save_msi_state(struct pci_dev *dev)
229{
230	int pos, i = 0;
231	u16 control;
232	struct pci_cap_saved_state *save_state;
233	u32 *cap;
234
235	if (!dev->msi_enabled)
236		return 0;
237
238	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
239	if (pos <= 0)
240		return 0;
241
242	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
243		GFP_KERNEL);
244	if (!save_state) {
245		printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
246		return -ENOMEM;
247	}
248	cap = &save_state->data[0];
249
250	pci_read_config_dword(dev, pos, &cap[i++]);
251	control = cap[0] >> 16;
252	pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
253	if (control & PCI_MSI_FLAGS_64BIT) {
254		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
255		pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
256	} else
257		pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
258	if (control & PCI_MSI_FLAGS_MASKBIT)
259		pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
260	save_state->cap_nr = PCI_CAP_ID_MSI;
261	pci_add_saved_cap(dev, save_state);
262	return 0;
263}
264
265static void __pci_restore_msi_state(struct pci_dev *dev)
266{
267	int i = 0, pos;
268	u16 control;
269	struct pci_cap_saved_state *save_state;
270	u32 *cap;
271
272	if (!dev->msi_enabled)
273		return;
274
275	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
276	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
277	if (!save_state || pos <= 0)
278		return;
279	cap = &save_state->data[0];
280
281	pci_intx(dev, 0);		/* disable intx */
282	control = cap[i++] >> 16;
283	msi_set_enable(dev, 0);
284	pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
285	if (control & PCI_MSI_FLAGS_64BIT) {
286		pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
287		pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
288	} else
289		pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
290	if (control & PCI_MSI_FLAGS_MASKBIT)
291		pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
292	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
293	pci_remove_saved_cap(save_state);
294	kfree(save_state);
295}
296
297static int __pci_save_msix_state(struct pci_dev *dev)
298{
299	int pos;
300	int irq, head, tail = 0;
301	u16 control;
302	struct pci_cap_saved_state *save_state;
303
304	if (!dev->msix_enabled)
305		return 0;
306
307	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
308	if (pos <= 0)
309		return 0;
310
311	/* save the capability */
312	pci_read_config_word(dev, msi_control_reg(pos), &control);
313	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
314		GFP_KERNEL);
315	if (!save_state) {
316		printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
317		return -ENOMEM;
318	}
319	*((u16 *)&save_state->data[0]) = control;
320
321	/* save the table */
322	irq = head = dev->first_msi_irq;
323	while (head != tail) {
324		struct msi_desc *entry;
325
326		entry = get_irq_msi(irq);
327		read_msi_msg(irq, &entry->msg_save);
328
329		tail = entry->link.tail;
330		irq = tail;
331	}
332
333	save_state->cap_nr = PCI_CAP_ID_MSIX;
334	pci_add_saved_cap(dev, save_state);
335	return 0;
336}
337
338int pci_save_msi_state(struct pci_dev *dev)
339{
340	int rc;
341
342	rc = __pci_save_msi_state(dev);
343	if (rc)
344		return rc;
345
346	rc = __pci_save_msix_state(dev);
347
348	return rc;
349}
350
351static void __pci_restore_msix_state(struct pci_dev *dev)
352{
353	u16 save;
354	int pos;
355	int irq, head, tail = 0;
356	struct msi_desc *entry;
357	struct pci_cap_saved_state *save_state;
358
359	if (!dev->msix_enabled)
360		return;
361
362	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
363	if (!save_state)
364		return;
365	save = *((u16 *)&save_state->data[0]);
366	pci_remove_saved_cap(save_state);
367	kfree(save_state);
368
369	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
370	if (pos <= 0)
371		return;
372
373	/* route the table */
374	pci_intx(dev, 0);		/* disable intx */
375	msix_set_enable(dev, 0);
376	irq = head = dev->first_msi_irq;
377	while (head != tail) {
378		entry = get_irq_msi(irq);
379		write_msi_msg(irq, &entry->msg_save);
380
381		tail = entry->link.tail;
382		irq = tail;
383	}
384
385	pci_write_config_word(dev, msi_control_reg(pos), save);
386}
387
388void pci_restore_msi_state(struct pci_dev *dev)
389{
390	__pci_restore_msi_state(dev);
391	__pci_restore_msix_state(dev);
392}
393#endif	/* CONFIG_PM */
394
395/**
396 * msi_capability_init - configure device's MSI capability structure
397 * @dev: pointer to the pci_dev data structure of MSI device function
398 *
399 * Setup the MSI capability structure of device function with a single
400 * MSI irq, regardless of device function is capable of handling
401 * multiple messages. A return of zero indicates the successful setup
402 * of an entry zero with the new MSI irq or non-zero for otherwise.
403 **/
404static int msi_capability_init(struct pci_dev *dev)
405{
406	struct msi_desc *entry;
407	int pos, irq;
408	u16 control;
409
410	msi_set_enable(dev, 0);	/* Ensure msi is disabled as I set it up */
411
412   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
413	pci_read_config_word(dev, msi_control_reg(pos), &control);
414	/* MSI Entry Initialization */
415	entry = alloc_msi_entry();
416	if (!entry)
417		return -ENOMEM;
418
419	entry->msi_attrib.type = PCI_CAP_ID_MSI;
420	entry->msi_attrib.is_64 = is_64bit_address(control);
421	entry->msi_attrib.entry_nr = 0;
422	entry->msi_attrib.maskbit = is_mask_bit_support(control);
423	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
424	entry->msi_attrib.pos = pos;
425	if (is_mask_bit_support(control)) {
426		entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
427				is_64bit_address(control));
428	}
429	entry->dev = dev;
430	if (entry->msi_attrib.maskbit) {
431		unsigned int maskbits, temp;
432		/* All MSIs are unmasked by default, Mask them all */
433		pci_read_config_dword(dev,
434			msi_mask_bits_reg(pos, is_64bit_address(control)),
435			&maskbits);
436		temp = (1 << multi_msi_capable(control));
437		temp = ((temp - 1) & ~temp);
438		maskbits |= temp;
439		pci_write_config_dword(dev,
440			msi_mask_bits_reg(pos, is_64bit_address(control)),
441			maskbits);
442	}
443	/* Configure MSI capability structure */
444	irq = arch_setup_msi_irq(dev, entry);
445	if (irq < 0) {
446		kmem_cache_free(msi_cachep, entry);
447		return irq;
448	}
449	entry->link.head = irq;
450	entry->link.tail = irq;
451	dev->first_msi_irq = irq;
452	set_irq_msi(irq, entry);
453
454	/* Set MSI enabled bits	 */
455	pci_intx(dev, 0);		/* disable intx */
456	msi_set_enable(dev, 1);
457	dev->msi_enabled = 1;
458
459	dev->irq = irq;
460	return 0;
461}
462
463/**
464 * msix_capability_init - configure device's MSI-X capability
465 * @dev: pointer to the pci_dev data structure of MSI-X device function
466 * @entries: pointer to an array of struct msix_entry entries
467 * @nvec: number of @entries
468 *
469 * Setup the MSI-X capability structure of device function with a
470 * single MSI-X irq. A return of zero indicates the successful setup of
471 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
472 **/
473static int msix_capability_init(struct pci_dev *dev,
474				struct msix_entry *entries, int nvec)
475{
476	struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
477	int irq, pos, i, j, nr_entries, temp = 0;
478	unsigned long phys_addr;
479	u32 table_offset;
480 	u16 control;
481	u8 bir;
482	void __iomem *base;
483
484	msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
485
486   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
487	/* Request & Map MSI-X table region */
488 	pci_read_config_word(dev, msi_control_reg(pos), &control);
489	nr_entries = multi_msix_capable(control);
490
491 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
492	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
493	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
494	phys_addr = pci_resource_start (dev, bir) + table_offset;
495	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
496	if (base == NULL)
497		return -ENOMEM;
498
499	/* MSI-X Table Initialization */
500	for (i = 0; i < nvec; i++) {
501		entry = alloc_msi_entry();
502		if (!entry)
503			break;
504
505 		j = entries[i].entry;
506		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
507		entry->msi_attrib.is_64 = 1;
508		entry->msi_attrib.entry_nr = j;
509		entry->msi_attrib.maskbit = 1;
510		entry->msi_attrib.default_irq = dev->irq;
511		entry->msi_attrib.pos = pos;
512		entry->dev = dev;
513		entry->mask_base = base;
514
515		/* Configure MSI-X capability structure */
516		irq = arch_setup_msi_irq(dev, entry);
517		if (irq < 0) {
518			kmem_cache_free(msi_cachep, entry);
519			break;
520		}
521 		entries[i].vector = irq;
522		if (!head) {
523			entry->link.head = irq;
524			entry->link.tail = irq;
525			head = entry;
526		} else {
527			entry->link.head = temp;
528			entry->link.tail = tail->link.tail;
529			tail->link.tail = irq;
530			head->link.head = irq;
531		}
532		temp = irq;
533		tail = entry;
534
535		set_irq_msi(irq, entry);
536	}
537	if (i != nvec) {
538		int avail = i - 1;
539		i--;
540		for (; i >= 0; i--) {
541			irq = (entries + i)->vector;
542			msi_free_irq(dev, irq);
543			(entries + i)->vector = 0;
544		}
545		/* If we had some success report the number of irqs
546		 * we succeeded in setting up.
547		 */
548		if (avail <= 0)
549			avail = -EBUSY;
550		return avail;
551	}
552	dev->first_msi_irq = entries[0].vector;
553	/* Set MSI-X enabled bits */
554	pci_intx(dev, 0);		/* disable intx */
555	msix_set_enable(dev, 1);
556	dev->msix_enabled = 1;
557
558	return 0;
559}
560
561/**
562 * pci_msi_supported - check whether MSI may be enabled on device
563 * @dev: pointer to the pci_dev data structure of MSI device function
564 *
565 * Look at global flags, the device itself, and its parent busses
566 * to return 0 if MSI are supported for the device.
567 **/
568static
569int pci_msi_supported(struct pci_dev * dev)
570{
571	struct pci_bus *bus;
572
573	/* MSI must be globally enabled and supported by the device */
574	if (!pci_msi_enable || !dev || dev->no_msi)
575		return -EINVAL;
576
577	/* Any bridge which does NOT route MSI transactions from it's
578	 * secondary bus to it's primary bus must set NO_MSI flag on
579	 * the secondary pci_bus.
580	 * We expect only arch-specific PCI host bus controller driver
581	 * or quirks for specific PCI bridges to be setting NO_MSI.
582	 */
583	for (bus = dev->bus; bus; bus = bus->parent)
584		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
585			return -EINVAL;
586
587	return 0;
588}
589
590/**
591 * pci_enable_msi - configure device's MSI capability structure
592 * @dev: pointer to the pci_dev data structure of MSI device function
593 *
594 * Setup the MSI capability structure of device function with
595 * a single MSI irq upon its software driver call to request for
596 * MSI mode enabled on its hardware device function. A return of zero
597 * indicates the successful setup of an entry zero with the new MSI
598 * irq or non-zero for otherwise.
599 **/
600int pci_enable_msi(struct pci_dev* dev)
601{
602	int pos, status;
603
604	if (pci_msi_supported(dev) < 0)
605		return -EINVAL;
606
607	status = msi_init();
608	if (status < 0)
609		return status;
610
611	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
612	if (!pos)
613		return -EINVAL;
614
615	WARN_ON(!!dev->msi_enabled);
616
617	/* Check whether driver already requested for MSI-X irqs */
618	if (dev->msix_enabled) {
619		printk(KERN_INFO "PCI: %s: Can't enable MSI.  "
620			"Device already has MSI-X enabled\n",
621			pci_name(dev));
622		return -EINVAL;
623	}
624	status = msi_capability_init(dev);
625	return status;
626}
627
628void pci_disable_msi(struct pci_dev* dev)
629{
630	struct msi_desc *entry;
631	int default_irq;
632
633	if (!pci_msi_enable)
634		return;
635	if (!dev)
636		return;
637
638	if (!dev->msi_enabled)
639		return;
640
641	msi_set_enable(dev, 0);
642	pci_intx(dev, 1);		/* enable intx */
643	dev->msi_enabled = 0;
644
645	entry = get_irq_msi(dev->first_msi_irq);
646	if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
647		return;
648	}
649	if (irq_has_action(dev->first_msi_irq)) {
650		printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
651		       "free_irq() on MSI irq %d\n",
652		       pci_name(dev), dev->first_msi_irq);
653		BUG_ON(irq_has_action(dev->first_msi_irq));
654	} else {
655		default_irq = entry->msi_attrib.default_irq;
656		msi_free_irq(dev, dev->first_msi_irq);
657
658		/* Restore dev->irq to its default pin-assertion irq */
659		dev->irq = default_irq;
660	}
661	dev->first_msi_irq = 0;
662}
663
664static int msi_free_irq(struct pci_dev* dev, int irq)
665{
666	struct msi_desc *entry;
667	int head, entry_nr, type;
668	void __iomem *base;
669
670	entry = get_irq_msi(irq);
671	if (!entry || entry->dev != dev) {
672		return -EINVAL;
673	}
674	type = entry->msi_attrib.type;
675	entry_nr = entry->msi_attrib.entry_nr;
676	head = entry->link.head;
677	base = entry->mask_base;
678	get_irq_msi(entry->link.head)->link.tail = entry->link.tail;
679	get_irq_msi(entry->link.tail)->link.head = entry->link.head;
680
681	arch_teardown_msi_irq(irq);
682	kmem_cache_free(msi_cachep, entry);
683
684	if (type == PCI_CAP_ID_MSIX) {
685		writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
686			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
687
688		if (head == irq)
689			iounmap(base);
690	}
691
692	return 0;
693}
694
695/**
696 * pci_enable_msix - configure device's MSI-X capability structure
697 * @dev: pointer to the pci_dev data structure of MSI-X device function
698 * @entries: pointer to an array of MSI-X entries
699 * @nvec: number of MSI-X irqs requested for allocation by device driver
700 *
701 * Setup the MSI-X capability structure of device function with the number
702 * of requested irqs upon its software driver call to request for
703 * MSI-X mode enabled on its hardware device function. A return of zero
704 * indicates the successful configuration of MSI-X capability structure
705 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
706 * Or a return of > 0 indicates that driver request is exceeding the number
707 * of irqs available. Driver should use the returned value to re-send
708 * its request.
709 **/
710int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
711{
712	int status, pos, nr_entries;
713	int i, j;
714	u16 control;
715
716	if (!entries || pci_msi_supported(dev) < 0)
717 		return -EINVAL;
718
719	status = msi_init();
720	if (status < 0)
721		return status;
722
723	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
724	if (!pos)
725 		return -EINVAL;
726
727	pci_read_config_word(dev, msi_control_reg(pos), &control);
728	nr_entries = multi_msix_capable(control);
729	if (nvec > nr_entries)
730		return -EINVAL;
731
732	/* Check for any invalid entries */
733	for (i = 0; i < nvec; i++) {
734		if (entries[i].entry >= nr_entries)
735			return -EINVAL;		/* invalid entry */
736		for (j = i + 1; j < nvec; j++) {
737			if (entries[i].entry == entries[j].entry)
738				return -EINVAL;	/* duplicate entry */
739		}
740	}
741	WARN_ON(!!dev->msix_enabled);
742
743	/* Check whether driver already requested for MSI irq */
744   	if (dev->msi_enabled) {
745		printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
746		       "Device already has an MSI irq assigned\n",
747		       pci_name(dev));
748		return -EINVAL;
749	}
750	status = msix_capability_init(dev, entries, nvec);
751	return status;
752}
753
754void pci_disable_msix(struct pci_dev* dev)
755{
756	int irq, head, tail = 0, warning = 0;
757
758	if (!pci_msi_enable)
759		return;
760	if (!dev)
761		return;
762
763	if (!dev->msix_enabled)
764		return;
765
766	msix_set_enable(dev, 0);
767	pci_intx(dev, 1);		/* enable intx */
768	dev->msix_enabled = 0;
769
770	irq = head = dev->first_msi_irq;
771	while (head != tail) {
772		tail = get_irq_msi(irq)->link.tail;
773		if (irq_has_action(irq))
774			warning = 1;
775		else if (irq != head)	/* Release MSI-X irq */
776			msi_free_irq(dev, irq);
777		irq = tail;
778	}
779	msi_free_irq(dev, irq);
780	if (warning) {
781		printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
782			"free_irq() on all MSI-X irqs\n",
783			pci_name(dev));
784		BUG_ON(warning > 0);
785	}
786	dev->first_msi_irq = 0;
787}
788
789/**
790 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
791 * @dev: pointer to the pci_dev data structure of MSI(X) device function
792 *
793 * Being called during hotplug remove, from which the device function
794 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
795 * allocated for this device function, are reclaimed to unused state,
796 * which may be used later on.
797 **/
798void msi_remove_pci_irq_vectors(struct pci_dev* dev)
799{
800	if (!pci_msi_enable || !dev)
801 		return;
802
803	if (dev->msi_enabled) {
804		if (irq_has_action(dev->first_msi_irq)) {
805			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
806			       "called without free_irq() on MSI irq %d\n",
807			       pci_name(dev), dev->first_msi_irq);
808			BUG_ON(irq_has_action(dev->first_msi_irq));
809		} else /* Release MSI irq assigned to this device */
810			msi_free_irq(dev, dev->first_msi_irq);
811	}
812	if (dev->msix_enabled) {
813		int irq, head, tail = 0, warning = 0;
814		void __iomem *base = NULL;
815
816		irq = head = dev->first_msi_irq;
817		while (head != tail) {
818			tail = get_irq_msi(irq)->link.tail;
819			base = get_irq_msi(irq)->mask_base;
820			if (irq_has_action(irq))
821				warning = 1;
822			else if (irq != head) /* Release MSI-X irq */
823				msi_free_irq(dev, irq);
824			irq = tail;
825		}
826		msi_free_irq(dev, irq);
827		if (warning) {
828			iounmap(base);
829			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
830			       "called without free_irq() on all MSI-X irqs\n",
831			       pci_name(dev));
832			BUG_ON(warning > 0);
833		}
834	}
835}
836
837void pci_no_msi(void)
838{
839	pci_msi_enable = 0;
840}
841
842EXPORT_SYMBOL(pci_enable_msi);
843EXPORT_SYMBOL(pci_disable_msi);
844EXPORT_SYMBOL(pci_enable_msix);
845EXPORT_SYMBOL(pci_disable_msix);
846