msi.c revision ded86d8d37736df67ddeec4ae00e2ec1a5a90b3c
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/smp_lock.h>
16#include <linux/pci.h>
17#include <linux/proc_fs.h>
18#include <linux/msi.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22#include <asm/smp.h>
23
24#include "pci.h"
25#include "msi.h"
26
27static DEFINE_SPINLOCK(msi_lock);
28static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
29static struct kmem_cache* msi_cachep;
30
31static int pci_msi_enable = 1;
32
33static int msi_cache_init(void)
34{
35	msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
36					0, SLAB_HWCACHE_ALIGN, NULL, NULL);
37	if (!msi_cachep)
38		return -ENOMEM;
39
40	return 0;
41}
42
43static void msi_set_mask_bit(unsigned int irq, int flag)
44{
45	struct msi_desc *entry;
46
47	entry = msi_desc[irq];
48	BUG_ON(!entry || !entry->dev);
49	switch (entry->msi_attrib.type) {
50	case PCI_CAP_ID_MSI:
51		if (entry->msi_attrib.maskbit) {
52			int pos;
53			u32 mask_bits;
54
55			pos = (long)entry->mask_base;
56			pci_read_config_dword(entry->dev, pos, &mask_bits);
57			mask_bits &= ~(1);
58			mask_bits |= flag;
59			pci_write_config_dword(entry->dev, pos, mask_bits);
60		}
61		break;
62	case PCI_CAP_ID_MSIX:
63	{
64		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
65			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
66		writel(flag, entry->mask_base + offset);
67		break;
68	}
69	default:
70		BUG();
71		break;
72	}
73}
74
75void read_msi_msg(unsigned int irq, struct msi_msg *msg)
76{
77	struct msi_desc *entry = get_irq_data(irq);
78	switch(entry->msi_attrib.type) {
79	case PCI_CAP_ID_MSI:
80	{
81		struct pci_dev *dev = entry->dev;
82		int pos = entry->msi_attrib.pos;
83		u16 data;
84
85		pci_read_config_dword(dev, msi_lower_address_reg(pos),
86					&msg->address_lo);
87		if (entry->msi_attrib.is_64) {
88			pci_read_config_dword(dev, msi_upper_address_reg(pos),
89						&msg->address_hi);
90			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
91		} else {
92			msg->address_hi = 0;
93			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
94		}
95		msg->data = data;
96		break;
97	}
98	case PCI_CAP_ID_MSIX:
99	{
100		void __iomem *base;
101		base = entry->mask_base +
102			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
103
104		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
105		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
106		msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
107 		break;
108 	}
109 	default:
110		BUG();
111	}
112}
113
114void write_msi_msg(unsigned int irq, struct msi_msg *msg)
115{
116	struct msi_desc *entry = get_irq_data(irq);
117	switch (entry->msi_attrib.type) {
118	case PCI_CAP_ID_MSI:
119	{
120		struct pci_dev *dev = entry->dev;
121		int pos = entry->msi_attrib.pos;
122
123		pci_write_config_dword(dev, msi_lower_address_reg(pos),
124					msg->address_lo);
125		if (entry->msi_attrib.is_64) {
126			pci_write_config_dword(dev, msi_upper_address_reg(pos),
127						msg->address_hi);
128			pci_write_config_word(dev, msi_data_reg(pos, 1),
129						msg->data);
130		} else {
131			pci_write_config_word(dev, msi_data_reg(pos, 0),
132						msg->data);
133		}
134		break;
135	}
136	case PCI_CAP_ID_MSIX:
137	{
138		void __iomem *base;
139		base = entry->mask_base +
140			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
141
142		writel(msg->address_lo,
143			base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
144		writel(msg->address_hi,
145			base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
146		writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
147		break;
148	}
149	default:
150		BUG();
151	}
152}
153
154void mask_msi_irq(unsigned int irq)
155{
156	msi_set_mask_bit(irq, 1);
157}
158
159void unmask_msi_irq(unsigned int irq)
160{
161	msi_set_mask_bit(irq, 0);
162}
163
164static int msi_free_irq(struct pci_dev* dev, int irq);
165
166static int msi_init(void)
167{
168	static int status = -ENOMEM;
169
170	if (!status)
171		return status;
172
173	status = msi_cache_init();
174	if (status < 0) {
175		pci_msi_enable = 0;
176		printk(KERN_WARNING "PCI: MSI cache init failed\n");
177		return status;
178	}
179
180	return status;
181}
182
183static struct msi_desc* alloc_msi_entry(void)
184{
185	struct msi_desc *entry;
186
187	entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
188	if (!entry)
189		return NULL;
190
191	entry->link.tail = entry->link.head = 0;	/* single message */
192	entry->dev = NULL;
193
194	return entry;
195}
196
197static void attach_msi_entry(struct msi_desc *entry, int irq)
198{
199	unsigned long flags;
200
201	spin_lock_irqsave(&msi_lock, flags);
202	msi_desc[irq] = entry;
203	spin_unlock_irqrestore(&msi_lock, flags);
204}
205
206static int create_msi_irq(void)
207{
208	struct msi_desc *entry;
209	int irq;
210
211	entry = alloc_msi_entry();
212	if (!entry)
213		return -ENOMEM;
214
215	irq = create_irq();
216	if (irq < 0) {
217		kmem_cache_free(msi_cachep, entry);
218		return -EBUSY;
219	}
220
221	set_irq_data(irq, entry);
222
223	return irq;
224}
225
226static void destroy_msi_irq(unsigned int irq)
227{
228	struct msi_desc *entry;
229
230	entry = get_irq_data(irq);
231	set_irq_chip(irq, NULL);
232	set_irq_data(irq, NULL);
233	destroy_irq(irq);
234	kmem_cache_free(msi_cachep, entry);
235}
236
237static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
238{
239	u16 control;
240
241	pci_read_config_word(dev, msi_control_reg(pos), &control);
242	if (type == PCI_CAP_ID_MSI) {
243		/* Set enabled bits to single MSI & enable MSI_enable bit */
244		msi_enable(control, 1);
245		pci_write_config_word(dev, msi_control_reg(pos), control);
246		dev->msi_enabled = 1;
247	} else {
248		msix_enable(control);
249		pci_write_config_word(dev, msi_control_reg(pos), control);
250		dev->msix_enabled = 1;
251	}
252
253	pci_intx(dev, 0);  /* disable intx */
254}
255
256void disable_msi_mode(struct pci_dev *dev, int pos, int type)
257{
258	u16 control;
259
260	pci_read_config_word(dev, msi_control_reg(pos), &control);
261	if (type == PCI_CAP_ID_MSI) {
262		/* Set enabled bits to single MSI & enable MSI_enable bit */
263		msi_disable(control);
264		pci_write_config_word(dev, msi_control_reg(pos), control);
265		dev->msi_enabled = 0;
266	} else {
267		msix_disable(control);
268		pci_write_config_word(dev, msi_control_reg(pos), control);
269		dev->msix_enabled = 0;
270	}
271
272	pci_intx(dev, 1);  /* enable intx */
273}
274
275#ifdef CONFIG_PM
276static int __pci_save_msi_state(struct pci_dev *dev)
277{
278	int pos, i = 0;
279	u16 control;
280	struct pci_cap_saved_state *save_state;
281	u32 *cap;
282
283	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
284	if (pos <= 0 || dev->no_msi)
285		return 0;
286
287	pci_read_config_word(dev, msi_control_reg(pos), &control);
288	if (!(control & PCI_MSI_FLAGS_ENABLE))
289		return 0;
290
291	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
292		GFP_KERNEL);
293	if (!save_state) {
294		printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
295		return -ENOMEM;
296	}
297	cap = &save_state->data[0];
298
299	pci_read_config_dword(dev, pos, &cap[i++]);
300	control = cap[0] >> 16;
301	pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
302	if (control & PCI_MSI_FLAGS_64BIT) {
303		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
304		pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
305	} else
306		pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
307	if (control & PCI_MSI_FLAGS_MASKBIT)
308		pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
309	save_state->cap_nr = PCI_CAP_ID_MSI;
310	pci_add_saved_cap(dev, save_state);
311	return 0;
312}
313
314static void __pci_restore_msi_state(struct pci_dev *dev)
315{
316	int i = 0, pos;
317	u16 control;
318	struct pci_cap_saved_state *save_state;
319	u32 *cap;
320
321	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
322	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
323	if (!save_state || pos <= 0)
324		return;
325	cap = &save_state->data[0];
326
327	control = cap[i++] >> 16;
328	pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
329	if (control & PCI_MSI_FLAGS_64BIT) {
330		pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
331		pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
332	} else
333		pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
334	if (control & PCI_MSI_FLAGS_MASKBIT)
335		pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
336	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
337	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
338	pci_remove_saved_cap(save_state);
339	kfree(save_state);
340}
341
342static int __pci_save_msix_state(struct pci_dev *dev)
343{
344	int pos;
345	int irq, head, tail = 0;
346	u16 control;
347	struct pci_cap_saved_state *save_state;
348
349	if (!dev->msix_enabled)
350		return 0;
351
352	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
353	if (pos <= 0 || dev->no_msi)
354		return 0;
355
356	/* save the capability */
357	pci_read_config_word(dev, msi_control_reg(pos), &control);
358	if (!(control & PCI_MSIX_FLAGS_ENABLE))
359		return 0;
360	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
361		GFP_KERNEL);
362	if (!save_state) {
363		printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
364		return -ENOMEM;
365	}
366	*((u16 *)&save_state->data[0]) = control;
367
368	/* save the table */
369	irq = head = dev->first_msi_irq;
370	while (head != tail) {
371		struct msi_desc *entry;
372
373		entry = msi_desc[irq];
374		read_msi_msg(irq, &entry->msg_save);
375
376		tail = msi_desc[irq]->link.tail;
377		irq = tail;
378	}
379
380	save_state->cap_nr = PCI_CAP_ID_MSIX;
381	pci_add_saved_cap(dev, save_state);
382	return 0;
383}
384
385int pci_save_msi_state(struct pci_dev *dev)
386{
387	int rc;
388
389	rc = __pci_save_msi_state(dev);
390	if (rc)
391		return rc;
392
393	rc = __pci_save_msix_state(dev);
394
395	return rc;
396}
397
398static void __pci_restore_msix_state(struct pci_dev *dev)
399{
400	u16 save;
401	int pos;
402	int irq, head, tail = 0;
403	struct msi_desc *entry;
404	struct pci_cap_saved_state *save_state;
405
406	if (!dev->msix_enabled)
407		return;
408
409	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
410	if (!save_state)
411		return;
412	save = *((u16 *)&save_state->data[0]);
413	pci_remove_saved_cap(save_state);
414	kfree(save_state);
415
416	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
417	if (pos <= 0)
418		return;
419
420	/* route the table */
421	irq = head = dev->first_msi_irq;
422	while (head != tail) {
423		entry = msi_desc[irq];
424		write_msi_msg(irq, &entry->msg_save);
425
426		tail = msi_desc[irq]->link.tail;
427		irq = tail;
428	}
429
430	pci_write_config_word(dev, msi_control_reg(pos), save);
431	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
432}
433
434void pci_restore_msi_state(struct pci_dev *dev)
435{
436	__pci_restore_msi_state(dev);
437	__pci_restore_msix_state(dev);
438}
439#endif	/* CONFIG_PM */
440
441/**
442 * msi_capability_init - configure device's MSI capability structure
443 * @dev: pointer to the pci_dev data structure of MSI device function
444 *
445 * Setup the MSI capability structure of device function with a single
446 * MSI irq, regardless of device function is capable of handling
447 * multiple messages. A return of zero indicates the successful setup
448 * of an entry zero with the new MSI irq or non-zero for otherwise.
449 **/
450static int msi_capability_init(struct pci_dev *dev)
451{
452	int status;
453	struct msi_desc *entry;
454	int pos, irq;
455	u16 control;
456
457   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
458	pci_read_config_word(dev, msi_control_reg(pos), &control);
459	/* MSI Entry Initialization */
460	irq = create_msi_irq();
461	if (irq < 0)
462		return irq;
463
464	entry = get_irq_data(irq);
465	entry->link.head = irq;
466	entry->link.tail = irq;
467	entry->msi_attrib.type = PCI_CAP_ID_MSI;
468	entry->msi_attrib.is_64 = is_64bit_address(control);
469	entry->msi_attrib.entry_nr = 0;
470	entry->msi_attrib.maskbit = is_mask_bit_support(control);
471	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
472	entry->msi_attrib.pos = pos;
473	if (is_mask_bit_support(control)) {
474		entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
475				is_64bit_address(control));
476	}
477	entry->dev = dev;
478	if (entry->msi_attrib.maskbit) {
479		unsigned int maskbits, temp;
480		/* All MSIs are unmasked by default, Mask them all */
481		pci_read_config_dword(dev,
482			msi_mask_bits_reg(pos, is_64bit_address(control)),
483			&maskbits);
484		temp = (1 << multi_msi_capable(control));
485		temp = ((temp - 1) & ~temp);
486		maskbits |= temp;
487		pci_write_config_dword(dev,
488			msi_mask_bits_reg(pos, is_64bit_address(control)),
489			maskbits);
490	}
491	/* Configure MSI capability structure */
492	status = arch_setup_msi_irq(irq, dev);
493	if (status < 0) {
494		destroy_msi_irq(irq);
495		return status;
496	}
497
498	dev->first_msi_irq = irq;
499	attach_msi_entry(entry, irq);
500	/* Set MSI enabled bits	 */
501	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
502
503	dev->irq = irq;
504	return 0;
505}
506
507/**
508 * msix_capability_init - configure device's MSI-X capability
509 * @dev: pointer to the pci_dev data structure of MSI-X device function
510 * @entries: pointer to an array of struct msix_entry entries
511 * @nvec: number of @entries
512 *
513 * Setup the MSI-X capability structure of device function with a
514 * single MSI-X irq. A return of zero indicates the successful setup of
515 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
516 **/
517static int msix_capability_init(struct pci_dev *dev,
518				struct msix_entry *entries, int nvec)
519{
520	struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
521	int status;
522	int irq, pos, i, j, nr_entries, temp = 0;
523	unsigned long phys_addr;
524	u32 table_offset;
525 	u16 control;
526	u8 bir;
527	void __iomem *base;
528
529   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
530	/* Request & Map MSI-X table region */
531 	pci_read_config_word(dev, msi_control_reg(pos), &control);
532	nr_entries = multi_msix_capable(control);
533
534 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
535	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
536	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
537	phys_addr = pci_resource_start (dev, bir) + table_offset;
538	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
539	if (base == NULL)
540		return -ENOMEM;
541
542	/* MSI-X Table Initialization */
543	for (i = 0; i < nvec; i++) {
544		irq = create_msi_irq();
545		if (irq < 0)
546			break;
547
548		entry = get_irq_data(irq);
549 		j = entries[i].entry;
550 		entries[i].vector = irq;
551		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
552		entry->msi_attrib.is_64 = 1;
553		entry->msi_attrib.entry_nr = j;
554		entry->msi_attrib.maskbit = 1;
555		entry->msi_attrib.default_irq = dev->irq;
556		entry->msi_attrib.pos = pos;
557		entry->dev = dev;
558		entry->mask_base = base;
559		if (!head) {
560			entry->link.head = irq;
561			entry->link.tail = irq;
562			head = entry;
563		} else {
564			entry->link.head = temp;
565			entry->link.tail = tail->link.tail;
566			tail->link.tail = irq;
567			head->link.head = irq;
568		}
569		temp = irq;
570		tail = entry;
571		/* Configure MSI-X capability structure */
572		status = arch_setup_msi_irq(irq, dev);
573		if (status < 0) {
574			destroy_msi_irq(irq);
575			break;
576		}
577
578		attach_msi_entry(entry, irq);
579	}
580	if (i != nvec) {
581		int avail = i - 1;
582		i--;
583		for (; i >= 0; i--) {
584			irq = (entries + i)->vector;
585			msi_free_irq(dev, irq);
586			(entries + i)->vector = 0;
587		}
588		/* If we had some success report the number of irqs
589		 * we succeeded in setting up.
590		 */
591		if (avail <= 0)
592			avail = -EBUSY;
593		return avail;
594	}
595	dev->first_msi_irq = entries[0].vector;
596	/* Set MSI-X enabled bits */
597	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
598
599	return 0;
600}
601
602/**
603 * pci_msi_supported - check whether MSI may be enabled on device
604 * @dev: pointer to the pci_dev data structure of MSI device function
605 *
606 * Look at global flags, the device itself, and its parent busses
607 * to return 0 if MSI are supported for the device.
608 **/
609static
610int pci_msi_supported(struct pci_dev * dev)
611{
612	struct pci_bus *bus;
613
614	/* MSI must be globally enabled and supported by the device */
615	if (!pci_msi_enable || !dev || dev->no_msi)
616		return -EINVAL;
617
618	/* Any bridge which does NOT route MSI transactions from it's
619	 * secondary bus to it's primary bus must set NO_MSI flag on
620	 * the secondary pci_bus.
621	 * We expect only arch-specific PCI host bus controller driver
622	 * or quirks for specific PCI bridges to be setting NO_MSI.
623	 */
624	for (bus = dev->bus; bus; bus = bus->parent)
625		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
626			return -EINVAL;
627
628	return 0;
629}
630
631/**
632 * pci_enable_msi - configure device's MSI capability structure
633 * @dev: pointer to the pci_dev data structure of MSI device function
634 *
635 * Setup the MSI capability structure of device function with
636 * a single MSI irq upon its software driver call to request for
637 * MSI mode enabled on its hardware device function. A return of zero
638 * indicates the successful setup of an entry zero with the new MSI
639 * irq or non-zero for otherwise.
640 **/
641int pci_enable_msi(struct pci_dev* dev)
642{
643	int pos, status;
644
645	if (pci_msi_supported(dev) < 0)
646		return -EINVAL;
647
648	status = msi_init();
649	if (status < 0)
650		return status;
651
652	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
653	if (!pos)
654		return -EINVAL;
655
656	WARN_ON(!!dev->msi_enabled);
657
658	/* Check whether driver already requested for MSI-X irqs */
659	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
660	if (pos > 0 && dev->msix_enabled) {
661			printk(KERN_INFO "PCI: %s: Can't enable MSI.  "
662			       "Device already has MSI-X enabled\n",
663			       pci_name(dev));
664			return -EINVAL;
665	}
666	status = msi_capability_init(dev);
667	return status;
668}
669
670void pci_disable_msi(struct pci_dev* dev)
671{
672	struct msi_desc *entry;
673	int pos, default_irq;
674	u16 control;
675	unsigned long flags;
676
677	if (!pci_msi_enable)
678		return;
679	if (!dev)
680		return;
681
682	if (!dev->msi_enabled)
683		return;
684
685	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
686	if (!pos)
687		return;
688
689	pci_read_config_word(dev, msi_control_reg(pos), &control);
690	if (!(control & PCI_MSI_FLAGS_ENABLE))
691		return;
692
693
694	disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
695
696	spin_lock_irqsave(&msi_lock, flags);
697	entry = msi_desc[dev->first_msi_irq];
698	if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
699		spin_unlock_irqrestore(&msi_lock, flags);
700		return;
701	}
702	if (irq_has_action(dev->first_msi_irq)) {
703		spin_unlock_irqrestore(&msi_lock, flags);
704		printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
705		       "free_irq() on MSI irq %d\n",
706		       pci_name(dev), dev->first_msi_irq);
707		BUG_ON(irq_has_action(dev->first_msi_irq));
708	} else {
709		default_irq = entry->msi_attrib.default_irq;
710		spin_unlock_irqrestore(&msi_lock, flags);
711		msi_free_irq(dev, dev->first_msi_irq);
712
713		/* Restore dev->irq to its default pin-assertion irq */
714		dev->irq = default_irq;
715	}
716	dev->first_msi_irq = 0;
717}
718
719static int msi_free_irq(struct pci_dev* dev, int irq)
720{
721	struct msi_desc *entry;
722	int head, entry_nr, type;
723	void __iomem *base;
724	unsigned long flags;
725
726	arch_teardown_msi_irq(irq);
727
728	spin_lock_irqsave(&msi_lock, flags);
729	entry = msi_desc[irq];
730	if (!entry || entry->dev != dev) {
731		spin_unlock_irqrestore(&msi_lock, flags);
732		return -EINVAL;
733	}
734	type = entry->msi_attrib.type;
735	entry_nr = entry->msi_attrib.entry_nr;
736	head = entry->link.head;
737	base = entry->mask_base;
738	msi_desc[entry->link.head]->link.tail = entry->link.tail;
739	msi_desc[entry->link.tail]->link.head = entry->link.head;
740	entry->dev = NULL;
741	msi_desc[irq] = NULL;
742	spin_unlock_irqrestore(&msi_lock, flags);
743
744	destroy_msi_irq(irq);
745
746	if (type == PCI_CAP_ID_MSIX) {
747		writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
748			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
749
750		if (head == irq)
751			iounmap(base);
752	}
753
754	return 0;
755}
756
757/**
758 * pci_enable_msix - configure device's MSI-X capability structure
759 * @dev: pointer to the pci_dev data structure of MSI-X device function
760 * @entries: pointer to an array of MSI-X entries
761 * @nvec: number of MSI-X irqs requested for allocation by device driver
762 *
763 * Setup the MSI-X capability structure of device function with the number
764 * of requested irqs upon its software driver call to request for
765 * MSI-X mode enabled on its hardware device function. A return of zero
766 * indicates the successful configuration of MSI-X capability structure
767 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
768 * Or a return of > 0 indicates that driver request is exceeding the number
769 * of irqs available. Driver should use the returned value to re-send
770 * its request.
771 **/
772int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
773{
774	int status, pos, nr_entries;
775	int i, j;
776	u16 control;
777
778	if (!entries || pci_msi_supported(dev) < 0)
779 		return -EINVAL;
780
781	status = msi_init();
782	if (status < 0)
783		return status;
784
785	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
786	if (!pos)
787 		return -EINVAL;
788
789	pci_read_config_word(dev, msi_control_reg(pos), &control);
790	nr_entries = multi_msix_capable(control);
791	if (nvec > nr_entries)
792		return -EINVAL;
793
794	/* Check for any invalid entries */
795	for (i = 0; i < nvec; i++) {
796		if (entries[i].entry >= nr_entries)
797			return -EINVAL;		/* invalid entry */
798		for (j = i + 1; j < nvec; j++) {
799			if (entries[i].entry == entries[j].entry)
800				return -EINVAL;	/* duplicate entry */
801		}
802	}
803	WARN_ON(!!dev->msix_enabled);
804
805	/* Check whether driver already requested for MSI irq */
806   	if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
807		dev->msi_enabled) {
808		printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
809		       "Device already has an MSI irq assigned\n",
810		       pci_name(dev));
811		return -EINVAL;
812	}
813	status = msix_capability_init(dev, entries, nvec);
814	return status;
815}
816
817void pci_disable_msix(struct pci_dev* dev)
818{
819	int irq, head, tail = 0, warning = 0;
820	unsigned long flags;
821	int pos;
822	u16 control;
823
824	if (!pci_msi_enable)
825		return;
826	if (!dev)
827		return;
828
829	if (!dev->msix_enabled)
830		return;
831
832	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
833	if (!pos)
834		return;
835
836	pci_read_config_word(dev, msi_control_reg(pos), &control);
837	if (!(control & PCI_MSIX_FLAGS_ENABLE))
838		return;
839
840	disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
841
842	irq = head = dev->first_msi_irq;
843	while (head != tail) {
844		spin_lock_irqsave(&msi_lock, flags);
845		tail = msi_desc[irq]->link.tail;
846		spin_unlock_irqrestore(&msi_lock, flags);
847		if (irq_has_action(irq))
848			warning = 1;
849		else if (irq != head)	/* Release MSI-X irq */
850			msi_free_irq(dev, irq);
851		irq = tail;
852	}
853	msi_free_irq(dev, irq);
854	if (warning) {
855		printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
856			"free_irq() on all MSI-X irqs\n",
857			pci_name(dev));
858		BUG_ON(warning > 0);
859	}
860	dev->first_msi_irq = 0;
861}
862
863/**
864 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
865 * @dev: pointer to the pci_dev data structure of MSI(X) device function
866 *
867 * Being called during hotplug remove, from which the device function
868 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
869 * allocated for this device function, are reclaimed to unused state,
870 * which may be used later on.
871 **/
872void msi_remove_pci_irq_vectors(struct pci_dev* dev)
873{
874	int pos;
875	unsigned long flags;
876
877	if (!pci_msi_enable || !dev)
878 		return;
879
880	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
881	if (pos > 0 && dev->msi_enabled) {
882		if (irq_has_action(dev->first_msi_irq)) {
883			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
884			       "called without free_irq() on MSI irq %d\n",
885			       pci_name(dev), dev->first_msi_irq);
886			BUG_ON(irq_has_action(dev->first_msi_irq));
887		} else /* Release MSI irq assigned to this device */
888			msi_free_irq(dev, dev->first_msi_irq);
889	}
890	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
891	if (pos > 0 && dev->msix_enabled) {
892		int irq, head, tail = 0, warning = 0;
893		void __iomem *base = NULL;
894
895		irq = head = dev->first_msi_irq;
896		while (head != tail) {
897			spin_lock_irqsave(&msi_lock, flags);
898			tail = msi_desc[irq]->link.tail;
899			base = msi_desc[irq]->mask_base;
900			spin_unlock_irqrestore(&msi_lock, flags);
901			if (irq_has_action(irq))
902				warning = 1;
903			else if (irq != head) /* Release MSI-X irq */
904				msi_free_irq(dev, irq);
905			irq = tail;
906		}
907		msi_free_irq(dev, irq);
908		if (warning) {
909			iounmap(base);
910			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
911			       "called without free_irq() on all MSI-X irqs\n",
912			       pci_name(dev));
913			BUG_ON(warning > 0);
914		}
915	}
916}
917
918void pci_no_msi(void)
919{
920	pci_msi_enable = 0;
921}
922
923EXPORT_SYMBOL(pci_enable_msi);
924EXPORT_SYMBOL(pci_disable_msi);
925EXPORT_SYMBOL(pci_enable_msix);
926EXPORT_SYMBOL(pci_disable_msix);
927