msi.c revision 24334a12533e9ac70dcb467ccd629f190afc5361
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/mm.h>
10#include <linux/irq.h>
11#include <linux/interrupt.h>
12#include <linux/init.h>
13#include <linux/ioport.h>
14#include <linux/smp_lock.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17
18#include <asm/errno.h>
19#include <asm/io.h>
20#include <asm/smp.h>
21
22#include "pci.h"
23#include "msi.h"
24
25static DEFINE_SPINLOCK(msi_lock);
26static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
27static kmem_cache_t* msi_cachep;
28
29static int pci_msi_enable = 1;
30static int last_alloc_vector;
31static int nr_released_vectors;
32static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
33static int nr_msix_devices;
34
35#ifndef CONFIG_X86_IO_APIC
36int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
37#endif
38
39static struct msi_ops *msi_ops;
40
41int
42msi_register(struct msi_ops *ops)
43{
44	msi_ops = ops;
45	return 0;
46}
47
48static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags)
49{
50	memset(p, 0, sizeof(struct msi_desc));
51}
52
53static int msi_cache_init(void)
54{
55	msi_cachep = kmem_cache_create("msi_cache",
56			sizeof(struct msi_desc),
57		       	0, SLAB_HWCACHE_ALIGN, msi_cache_ctor, NULL);
58	if (!msi_cachep)
59		return -ENOMEM;
60
61	return 0;
62}
63
64static void msi_set_mask_bit(unsigned int vector, int flag)
65{
66	struct msi_desc *entry;
67
68	entry = (struct msi_desc *)msi_desc[vector];
69	if (!entry || !entry->dev || !entry->mask_base)
70		return;
71	switch (entry->msi_attrib.type) {
72	case PCI_CAP_ID_MSI:
73	{
74		int		pos;
75		u32		mask_bits;
76
77		pos = (long)entry->mask_base;
78		pci_read_config_dword(entry->dev, pos, &mask_bits);
79		mask_bits &= ~(1);
80		mask_bits |= flag;
81		pci_write_config_dword(entry->dev, pos, mask_bits);
82		break;
83	}
84	case PCI_CAP_ID_MSIX:
85	{
86		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
87			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
88		writel(flag, entry->mask_base + offset);
89		break;
90	}
91	default:
92		break;
93	}
94}
95
96#ifdef CONFIG_SMP
97static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
98{
99	struct msi_desc *entry;
100	u32 address_hi, address_lo;
101	unsigned int irq = vector;
102	unsigned int dest_cpu = first_cpu(cpu_mask);
103
104	entry = (struct msi_desc *)msi_desc[vector];
105	if (!entry || !entry->dev)
106		return;
107
108	switch (entry->msi_attrib.type) {
109	case PCI_CAP_ID_MSI:
110	{
111		int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI);
112
113		if (!pos)
114			return;
115
116		pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
117			&address_hi);
118		pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
119			&address_lo);
120
121		msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
122
123		pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
124			address_hi);
125		pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
126			address_lo);
127		set_native_irq_info(irq, cpu_mask);
128		break;
129	}
130	case PCI_CAP_ID_MSIX:
131	{
132		int offset_hi =
133			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
134				PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
135		int offset_lo =
136			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
137				PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
138
139		address_hi = readl(entry->mask_base + offset_hi);
140		address_lo = readl(entry->mask_base + offset_lo);
141
142		msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
143
144		writel(address_hi, entry->mask_base + offset_hi);
145		writel(address_lo, entry->mask_base + offset_lo);
146		set_native_irq_info(irq, cpu_mask);
147		break;
148	}
149	default:
150		break;
151	}
152}
153#else
154#define set_msi_affinity NULL
155#endif /* CONFIG_SMP */
156
157static void mask_MSI_irq(unsigned int vector)
158{
159	msi_set_mask_bit(vector, 1);
160}
161
162static void unmask_MSI_irq(unsigned int vector)
163{
164	msi_set_mask_bit(vector, 0);
165}
166
167static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
168{
169	struct msi_desc *entry;
170	unsigned long flags;
171
172	spin_lock_irqsave(&msi_lock, flags);
173	entry = msi_desc[vector];
174	if (!entry || !entry->dev) {
175		spin_unlock_irqrestore(&msi_lock, flags);
176		return 0;
177	}
178	entry->msi_attrib.state = 1;	/* Mark it active */
179	spin_unlock_irqrestore(&msi_lock, flags);
180
181	return 0;	/* never anything pending */
182}
183
184static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
185{
186	startup_msi_irq_wo_maskbit(vector);
187	unmask_MSI_irq(vector);
188	return 0;	/* never anything pending */
189}
190
191static void shutdown_msi_irq(unsigned int vector)
192{
193	struct msi_desc *entry;
194	unsigned long flags;
195
196	spin_lock_irqsave(&msi_lock, flags);
197	entry = msi_desc[vector];
198	if (entry && entry->dev)
199		entry->msi_attrib.state = 0;	/* Mark it not active */
200	spin_unlock_irqrestore(&msi_lock, flags);
201}
202
203static void end_msi_irq_wo_maskbit(unsigned int vector)
204{
205	move_native_irq(vector);
206	ack_APIC_irq();
207}
208
209static void end_msi_irq_w_maskbit(unsigned int vector)
210{
211	move_native_irq(vector);
212	unmask_MSI_irq(vector);
213	ack_APIC_irq();
214}
215
216static void do_nothing(unsigned int vector)
217{
218}
219
220/*
221 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
222 * which implement the MSI-X Capability Structure.
223 */
224static struct hw_interrupt_type msix_irq_type = {
225	.typename	= "PCI-MSI-X",
226	.startup	= startup_msi_irq_w_maskbit,
227	.shutdown	= shutdown_msi_irq,
228	.enable		= unmask_MSI_irq,
229	.disable	= mask_MSI_irq,
230	.ack		= mask_MSI_irq,
231	.end		= end_msi_irq_w_maskbit,
232	.set_affinity	= set_msi_affinity
233};
234
235/*
236 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
237 * which implement the MSI Capability Structure with
238 * Mask-and-Pending Bits.
239 */
240static struct hw_interrupt_type msi_irq_w_maskbit_type = {
241	.typename	= "PCI-MSI",
242	.startup	= startup_msi_irq_w_maskbit,
243	.shutdown	= shutdown_msi_irq,
244	.enable		= unmask_MSI_irq,
245	.disable	= mask_MSI_irq,
246	.ack		= mask_MSI_irq,
247	.end		= end_msi_irq_w_maskbit,
248	.set_affinity	= set_msi_affinity
249};
250
251/*
252 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
253 * which implement the MSI Capability Structure without
254 * Mask-and-Pending Bits.
255 */
256static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
257	.typename	= "PCI-MSI",
258	.startup	= startup_msi_irq_wo_maskbit,
259	.shutdown	= shutdown_msi_irq,
260	.enable		= do_nothing,
261	.disable	= do_nothing,
262	.ack		= do_nothing,
263	.end		= end_msi_irq_wo_maskbit,
264	.set_affinity	= set_msi_affinity
265};
266
267static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
268static int assign_msi_vector(void)
269{
270	static int new_vector_avail = 1;
271	int vector;
272	unsigned long flags;
273
274	/*
275	 * msi_lock is provided to ensure that successful allocation of MSI
276	 * vector is assigned unique among drivers.
277	 */
278	spin_lock_irqsave(&msi_lock, flags);
279
280	if (!new_vector_avail) {
281		int free_vector = 0;
282
283		/*
284	 	 * vector_irq[] = -1 indicates that this specific vector is:
285	 	 * - assigned for MSI (since MSI have no associated IRQ) or
286	 	 * - assigned for legacy if less than 16, or
287	 	 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
288	 	 * vector_irq[] = 0 indicates that this vector, previously
289		 * assigned for MSI, is freed by hotplug removed operations.
290		 * This vector will be reused for any subsequent hotplug added
291		 * operations.
292	 	 * vector_irq[] > 0 indicates that this vector is assigned for
293		 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
294		 * vector-to-IOxAPIC IRQ mapping.
295	 	 */
296		for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
297			if (vector_irq[vector] != 0)
298				continue;
299			free_vector = vector;
300			if (!msi_desc[vector])
301			      	break;
302			else
303				continue;
304		}
305		if (!free_vector) {
306			spin_unlock_irqrestore(&msi_lock, flags);
307			return -EBUSY;
308		}
309		vector_irq[free_vector] = -1;
310		nr_released_vectors--;
311		spin_unlock_irqrestore(&msi_lock, flags);
312		if (msi_desc[free_vector] != NULL) {
313			struct pci_dev *dev;
314			int tail;
315
316			/* free all linked vectors before re-assign */
317			do {
318				spin_lock_irqsave(&msi_lock, flags);
319				dev = msi_desc[free_vector]->dev;
320				tail = msi_desc[free_vector]->link.tail;
321				spin_unlock_irqrestore(&msi_lock, flags);
322				msi_free_vector(dev, tail, 1);
323			} while (free_vector != tail);
324		}
325
326		return free_vector;
327	}
328	vector = assign_irq_vector(AUTO_ASSIGN);
329	last_alloc_vector = vector;
330	if (vector  == LAST_DEVICE_VECTOR)
331		new_vector_avail = 0;
332
333	spin_unlock_irqrestore(&msi_lock, flags);
334	return vector;
335}
336
337static int get_new_vector(void)
338{
339	int vector = assign_msi_vector();
340
341	if (vector > 0)
342		set_intr_gate(vector, interrupt[vector]);
343
344	return vector;
345}
346
347static int msi_init(void)
348{
349	static int status = -ENOMEM;
350
351	if (!status)
352		return status;
353
354	if (pci_msi_quirk) {
355		pci_msi_enable = 0;
356		printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
357		status = -EINVAL;
358		return status;
359	}
360
361	status = msi_arch_init();
362	if (status < 0) {
363		pci_msi_enable = 0;
364		printk(KERN_WARNING
365		       "PCI: MSI arch init failed.  MSI disabled.\n");
366		return status;
367	}
368
369	if (! msi_ops) {
370		printk(KERN_WARNING
371		       "PCI: MSI ops not registered. MSI disabled.\n");
372		status = -EINVAL;
373		return status;
374	}
375
376	last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
377	status = msi_cache_init();
378	if (status < 0) {
379		pci_msi_enable = 0;
380		printk(KERN_WARNING "PCI: MSI cache init failed\n");
381		return status;
382	}
383
384	if (last_alloc_vector < 0) {
385		pci_msi_enable = 0;
386		printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
387		status = -EBUSY;
388		return status;
389	}
390	vector_irq[last_alloc_vector] = 0;
391	nr_released_vectors++;
392
393	return status;
394}
395
396static int get_msi_vector(struct pci_dev *dev)
397{
398	return get_new_vector();
399}
400
401static struct msi_desc* alloc_msi_entry(void)
402{
403	struct msi_desc *entry;
404
405	entry = kmem_cache_alloc(msi_cachep, SLAB_KERNEL);
406	if (!entry)
407		return NULL;
408
409	memset(entry, 0, sizeof(struct msi_desc));
410	entry->link.tail = entry->link.head = 0;	/* single message */
411	entry->dev = NULL;
412
413	return entry;
414}
415
416static void attach_msi_entry(struct msi_desc *entry, int vector)
417{
418	unsigned long flags;
419
420	spin_lock_irqsave(&msi_lock, flags);
421	msi_desc[vector] = entry;
422	spin_unlock_irqrestore(&msi_lock, flags);
423}
424
425static void irq_handler_init(int cap_id, int pos, int mask)
426{
427	unsigned long flags;
428
429	spin_lock_irqsave(&irq_desc[pos].lock, flags);
430	if (cap_id == PCI_CAP_ID_MSIX)
431		irq_desc[pos].chip = &msix_irq_type;
432	else {
433		if (!mask)
434			irq_desc[pos].chip = &msi_irq_wo_maskbit_type;
435		else
436			irq_desc[pos].chip = &msi_irq_w_maskbit_type;
437	}
438	spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
439}
440
441static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
442{
443	u16 control;
444
445	pci_read_config_word(dev, msi_control_reg(pos), &control);
446	if (type == PCI_CAP_ID_MSI) {
447		/* Set enabled bits to single MSI & enable MSI_enable bit */
448		msi_enable(control, 1);
449		pci_write_config_word(dev, msi_control_reg(pos), control);
450		dev->msi_enabled = 1;
451	} else {
452		msix_enable(control);
453		pci_write_config_word(dev, msi_control_reg(pos), control);
454		dev->msix_enabled = 1;
455	}
456    	if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
457		/* PCI Express Endpoint device detected */
458		pci_intx(dev, 0);  /* disable intx */
459	}
460}
461
462void disable_msi_mode(struct pci_dev *dev, int pos, int type)
463{
464	u16 control;
465
466	pci_read_config_word(dev, msi_control_reg(pos), &control);
467	if (type == PCI_CAP_ID_MSI) {
468		/* Set enabled bits to single MSI & enable MSI_enable bit */
469		msi_disable(control);
470		pci_write_config_word(dev, msi_control_reg(pos), control);
471		dev->msi_enabled = 0;
472	} else {
473		msix_disable(control);
474		pci_write_config_word(dev, msi_control_reg(pos), control);
475		dev->msix_enabled = 0;
476	}
477    	if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
478		/* PCI Express Endpoint device detected */
479		pci_intx(dev, 1);  /* enable intx */
480	}
481}
482
483static int msi_lookup_vector(struct pci_dev *dev, int type)
484{
485	int vector;
486	unsigned long flags;
487
488	spin_lock_irqsave(&msi_lock, flags);
489	for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
490		if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
491			msi_desc[vector]->msi_attrib.type != type ||
492			msi_desc[vector]->msi_attrib.default_vector != dev->irq)
493			continue;
494		spin_unlock_irqrestore(&msi_lock, flags);
495		/* This pre-assigned MSI vector for this device
496		   already exits. Override dev->irq with this vector */
497		dev->irq = vector;
498		return 0;
499	}
500	spin_unlock_irqrestore(&msi_lock, flags);
501
502	return -EACCES;
503}
504
505void pci_scan_msi_device(struct pci_dev *dev)
506{
507	if (!dev)
508		return;
509
510   	if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
511		nr_msix_devices++;
512	else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
513		nr_reserved_vectors++;
514}
515
516#ifdef CONFIG_PM
517int pci_save_msi_state(struct pci_dev *dev)
518{
519	int pos, i = 0;
520	u16 control;
521	struct pci_cap_saved_state *save_state;
522	u32 *cap;
523
524	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
525	if (pos <= 0 || dev->no_msi)
526		return 0;
527
528	pci_read_config_word(dev, msi_control_reg(pos), &control);
529	if (!(control & PCI_MSI_FLAGS_ENABLE))
530		return 0;
531
532	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
533		GFP_KERNEL);
534	if (!save_state) {
535		printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
536		return -ENOMEM;
537	}
538	cap = &save_state->data[0];
539
540	pci_read_config_dword(dev, pos, &cap[i++]);
541	control = cap[0] >> 16;
542	pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
543	if (control & PCI_MSI_FLAGS_64BIT) {
544		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
545		pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
546	} else
547		pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
548	if (control & PCI_MSI_FLAGS_MASKBIT)
549		pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
550	save_state->cap_nr = PCI_CAP_ID_MSI;
551	pci_add_saved_cap(dev, save_state);
552	return 0;
553}
554
555void pci_restore_msi_state(struct pci_dev *dev)
556{
557	int i = 0, pos;
558	u16 control;
559	struct pci_cap_saved_state *save_state;
560	u32 *cap;
561
562	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
563	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
564	if (!save_state || pos <= 0)
565		return;
566	cap = &save_state->data[0];
567
568	control = cap[i++] >> 16;
569	pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
570	if (control & PCI_MSI_FLAGS_64BIT) {
571		pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
572		pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
573	} else
574		pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
575	if (control & PCI_MSI_FLAGS_MASKBIT)
576		pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
577	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
578	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
579	pci_remove_saved_cap(save_state);
580	kfree(save_state);
581}
582
583int pci_save_msix_state(struct pci_dev *dev)
584{
585	int pos;
586	int temp;
587	int vector, head, tail = 0;
588	u16 control;
589	struct pci_cap_saved_state *save_state;
590
591	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
592	if (pos <= 0 || dev->no_msi)
593		return 0;
594
595	/* save the capability */
596	pci_read_config_word(dev, msi_control_reg(pos), &control);
597	if (!(control & PCI_MSIX_FLAGS_ENABLE))
598		return 0;
599	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
600		GFP_KERNEL);
601	if (!save_state) {
602		printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
603		return -ENOMEM;
604	}
605	*((u16 *)&save_state->data[0]) = control;
606
607	/* save the table */
608	temp = dev->irq;
609	if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
610		kfree(save_state);
611		return -EINVAL;
612	}
613
614	vector = head = dev->irq;
615	while (head != tail) {
616		int j;
617		void __iomem *base;
618		struct msi_desc *entry;
619
620		entry = msi_desc[vector];
621		base = entry->mask_base;
622		j = entry->msi_attrib.entry_nr;
623
624		entry->address_lo_save =
625			readl(base + j * PCI_MSIX_ENTRY_SIZE +
626			      PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
627		entry->address_hi_save =
628			readl(base + j * PCI_MSIX_ENTRY_SIZE +
629			      PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
630		entry->data_save =
631			readl(base + j * PCI_MSIX_ENTRY_SIZE +
632			      PCI_MSIX_ENTRY_DATA_OFFSET);
633
634		tail = msi_desc[vector]->link.tail;
635		vector = tail;
636	}
637	dev->irq = temp;
638
639	save_state->cap_nr = PCI_CAP_ID_MSIX;
640	pci_add_saved_cap(dev, save_state);
641	return 0;
642}
643
644void pci_restore_msix_state(struct pci_dev *dev)
645{
646	u16 save;
647	int pos;
648	int vector, head, tail = 0;
649	void __iomem *base;
650	int j;
651	struct msi_desc *entry;
652	int temp;
653	struct pci_cap_saved_state *save_state;
654
655	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
656	if (!save_state)
657		return;
658	save = *((u16 *)&save_state->data[0]);
659	pci_remove_saved_cap(save_state);
660	kfree(save_state);
661
662	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
663	if (pos <= 0)
664		return;
665
666	/* route the table */
667	temp = dev->irq;
668	if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
669		return;
670	vector = head = dev->irq;
671	while (head != tail) {
672		entry = msi_desc[vector];
673		base = entry->mask_base;
674		j = entry->msi_attrib.entry_nr;
675
676		writel(entry->address_lo_save,
677			base + j * PCI_MSIX_ENTRY_SIZE +
678			PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
679		writel(entry->address_hi_save,
680			base + j * PCI_MSIX_ENTRY_SIZE +
681			PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
682		writel(entry->data_save,
683			base + j * PCI_MSIX_ENTRY_SIZE +
684			PCI_MSIX_ENTRY_DATA_OFFSET);
685
686		tail = msi_desc[vector]->link.tail;
687		vector = tail;
688	}
689	dev->irq = temp;
690
691	pci_write_config_word(dev, msi_control_reg(pos), save);
692	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
693}
694#endif
695
696static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
697{
698	int status;
699	u32 address_hi;
700	u32 address_lo;
701	u32 data;
702	int pos, vector = dev->irq;
703	u16 control;
704
705   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
706	pci_read_config_word(dev, msi_control_reg(pos), &control);
707
708	/* Configure MSI capability structure */
709	status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data);
710	if (status < 0)
711		return status;
712
713	pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo);
714	if (is_64bit_address(control)) {
715		pci_write_config_dword(dev,
716			msi_upper_address_reg(pos), address_hi);
717		pci_write_config_word(dev,
718			msi_data_reg(pos, 1), data);
719	} else
720		pci_write_config_word(dev,
721			msi_data_reg(pos, 0), data);
722	if (entry->msi_attrib.maskbit) {
723		unsigned int maskbits, temp;
724		/* All MSIs are unmasked by default, Mask them all */
725		pci_read_config_dword(dev,
726			msi_mask_bits_reg(pos, is_64bit_address(control)),
727			&maskbits);
728		temp = (1 << multi_msi_capable(control));
729		temp = ((temp - 1) & ~temp);
730		maskbits |= temp;
731		pci_write_config_dword(dev,
732			msi_mask_bits_reg(pos, is_64bit_address(control)),
733			maskbits);
734	}
735
736	return 0;
737}
738
739/**
740 * msi_capability_init - configure device's MSI capability structure
741 * @dev: pointer to the pci_dev data structure of MSI device function
742 *
743 * Setup the MSI capability structure of device function with a single
744 * MSI vector, regardless of device function is capable of handling
745 * multiple messages. A return of zero indicates the successful setup
746 * of an entry zero with the new MSI vector or non-zero for otherwise.
747 **/
748static int msi_capability_init(struct pci_dev *dev)
749{
750	int status;
751	struct msi_desc *entry;
752	int pos, vector;
753	u16 control;
754
755   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
756	pci_read_config_word(dev, msi_control_reg(pos), &control);
757	/* MSI Entry Initialization */
758	entry = alloc_msi_entry();
759	if (!entry)
760		return -ENOMEM;
761
762	vector = get_msi_vector(dev);
763	if (vector < 0) {
764		kmem_cache_free(msi_cachep, entry);
765		return -EBUSY;
766	}
767	entry->link.head = vector;
768	entry->link.tail = vector;
769	entry->msi_attrib.type = PCI_CAP_ID_MSI;
770	entry->msi_attrib.state = 0;			/* Mark it not active */
771	entry->msi_attrib.entry_nr = 0;
772	entry->msi_attrib.maskbit = is_mask_bit_support(control);
773	entry->msi_attrib.default_vector = dev->irq;	/* Save IOAPIC IRQ */
774	dev->irq = vector;
775	entry->dev = dev;
776	if (is_mask_bit_support(control)) {
777		entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
778				is_64bit_address(control));
779	}
780	/* Replace with MSI handler */
781	irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
782	/* Configure MSI capability structure */
783	status = msi_register_init(dev, entry);
784	if (status != 0) {
785		dev->irq = entry->msi_attrib.default_vector;
786		kmem_cache_free(msi_cachep, entry);
787		return status;
788	}
789
790	attach_msi_entry(entry, vector);
791	/* Set MSI enabled bits	 */
792	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
793
794	return 0;
795}
796
797/**
798 * msix_capability_init - configure device's MSI-X capability
799 * @dev: pointer to the pci_dev data structure of MSI-X device function
800 * @entries: pointer to an array of struct msix_entry entries
801 * @nvec: number of @entries
802 *
803 * Setup the MSI-X capability structure of device function with a
804 * single MSI-X vector. A return of zero indicates the successful setup of
805 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
806 **/
807static int msix_capability_init(struct pci_dev *dev,
808				struct msix_entry *entries, int nvec)
809{
810	struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
811	u32 address_hi;
812	u32 address_lo;
813	u32 data;
814	int status;
815	int vector, pos, i, j, nr_entries, temp = 0;
816	unsigned long phys_addr;
817	u32 table_offset;
818 	u16 control;
819	u8 bir;
820	void __iomem *base;
821
822   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
823	/* Request & Map MSI-X table region */
824 	pci_read_config_word(dev, msi_control_reg(pos), &control);
825	nr_entries = multi_msix_capable(control);
826
827 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
828	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
829	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
830	phys_addr = pci_resource_start (dev, bir) + table_offset;
831	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
832	if (base == NULL)
833		return -ENOMEM;
834
835	/* MSI-X Table Initialization */
836	for (i = 0; i < nvec; i++) {
837		entry = alloc_msi_entry();
838		if (!entry)
839			break;
840		vector = get_msi_vector(dev);
841		if (vector < 0) {
842			kmem_cache_free(msi_cachep, entry);
843			break;
844		}
845
846 		j = entries[i].entry;
847 		entries[i].vector = vector;
848		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
849 		entry->msi_attrib.state = 0;		/* Mark it not active */
850		entry->msi_attrib.entry_nr = j;
851		entry->msi_attrib.maskbit = 1;
852		entry->msi_attrib.default_vector = dev->irq;
853		entry->dev = dev;
854		entry->mask_base = base;
855		if (!head) {
856			entry->link.head = vector;
857			entry->link.tail = vector;
858			head = entry;
859		} else {
860			entry->link.head = temp;
861			entry->link.tail = tail->link.tail;
862			tail->link.tail = vector;
863			head->link.head = vector;
864		}
865		temp = vector;
866		tail = entry;
867		/* Replace with MSI-X handler */
868		irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
869		/* Configure MSI-X capability structure */
870		status = msi_ops->setup(dev, vector,
871					&address_hi,
872					&address_lo,
873					&data);
874		if (status < 0)
875			break;
876
877		writel(address_lo,
878			base + j * PCI_MSIX_ENTRY_SIZE +
879			PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
880		writel(address_hi,
881			base + j * PCI_MSIX_ENTRY_SIZE +
882			PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
883		writel(data,
884			base + j * PCI_MSIX_ENTRY_SIZE +
885			PCI_MSIX_ENTRY_DATA_OFFSET);
886		attach_msi_entry(entry, vector);
887	}
888	if (i != nvec) {
889		i--;
890		for (; i >= 0; i--) {
891			vector = (entries + i)->vector;
892			msi_free_vector(dev, vector, 0);
893			(entries + i)->vector = 0;
894		}
895		return -EBUSY;
896	}
897	/* Set MSI-X enabled bits */
898	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
899
900	return 0;
901}
902
903/**
904 * pci_msi_supported - check whether MSI may be enabled on device
905 * @dev: pointer to the pci_dev data structure of MSI device function
906 *
907 * MSI must be globally enabled and supported by the device and its root
908 * bus. But, the root bus is not easy to find since some architectures
909 * have virtual busses on top of the PCI hierarchy (for instance the
910 * hypertransport bus), while the actual bus where MSI must be supported
911 * is below. So we test the MSI flag on all parent busses and assume
912 * that no quirk will ever set the NO_MSI flag on a non-root bus.
913 **/
914static
915int pci_msi_supported(struct pci_dev * dev)
916{
917	struct pci_bus *bus;
918
919	if (!pci_msi_enable || !dev || dev->no_msi)
920		return -EINVAL;
921
922	/* check MSI flags of all parent busses */
923	for (bus = dev->bus; bus; bus = bus->parent)
924		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
925			return -EINVAL;
926
927	return 0;
928}
929
930/**
931 * pci_enable_msi - configure device's MSI capability structure
932 * @dev: pointer to the pci_dev data structure of MSI device function
933 *
934 * Setup the MSI capability structure of device function with
935 * a single MSI vector upon its software driver call to request for
936 * MSI mode enabled on its hardware device function. A return of zero
937 * indicates the successful setup of an entry zero with the new MSI
938 * vector or non-zero for otherwise.
939 **/
940int pci_enable_msi(struct pci_dev* dev)
941{
942	int pos, temp, status;
943	u16 control;
944
945	if (pci_msi_supported(dev) < 0)
946		return -EINVAL;
947
948	temp = dev->irq;
949
950	status = msi_init();
951	if (status < 0)
952		return status;
953
954	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
955	if (!pos)
956		return -EINVAL;
957
958	if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
959		/* Lookup Sucess */
960		unsigned long flags;
961
962		pci_read_config_word(dev, msi_control_reg(pos), &control);
963		if (control & PCI_MSI_FLAGS_ENABLE)
964			return 0;	/* Already in MSI mode */
965		spin_lock_irqsave(&msi_lock, flags);
966		if (!vector_irq[dev->irq]) {
967			msi_desc[dev->irq]->msi_attrib.state = 0;
968			vector_irq[dev->irq] = -1;
969			nr_released_vectors--;
970			spin_unlock_irqrestore(&msi_lock, flags);
971			status = msi_register_init(dev, msi_desc[dev->irq]);
972			if (status == 0)
973				enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
974			return status;
975		}
976		spin_unlock_irqrestore(&msi_lock, flags);
977		dev->irq = temp;
978	}
979	/* Check whether driver already requested for MSI-X vectors */
980	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
981	if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
982			printk(KERN_INFO "PCI: %s: Can't enable MSI.  "
983			       "Device already has MSI-X vectors assigned\n",
984			       pci_name(dev));
985			dev->irq = temp;
986			return -EINVAL;
987	}
988	status = msi_capability_init(dev);
989	if (!status) {
990   		if (!pos)
991			nr_reserved_vectors--;	/* Only MSI capable */
992		else if (nr_msix_devices > 0)
993			nr_msix_devices--;	/* Both MSI and MSI-X capable,
994						   but choose enabling MSI */
995	}
996
997	return status;
998}
999
1000void pci_disable_msi(struct pci_dev* dev)
1001{
1002	struct msi_desc *entry;
1003	int pos, default_vector;
1004	u16 control;
1005	unsigned long flags;
1006
1007	if (!pci_msi_enable)
1008		return;
1009	if (!dev)
1010		return;
1011
1012	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1013	if (!pos)
1014		return;
1015
1016	pci_read_config_word(dev, msi_control_reg(pos), &control);
1017	if (!(control & PCI_MSI_FLAGS_ENABLE))
1018		return;
1019
1020	spin_lock_irqsave(&msi_lock, flags);
1021	entry = msi_desc[dev->irq];
1022	if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
1023		spin_unlock_irqrestore(&msi_lock, flags);
1024		return;
1025	}
1026	if (entry->msi_attrib.state) {
1027		spin_unlock_irqrestore(&msi_lock, flags);
1028		printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
1029		       "free_irq() on MSI vector %d\n",
1030		       pci_name(dev), dev->irq);
1031		BUG_ON(entry->msi_attrib.state > 0);
1032	} else {
1033		vector_irq[dev->irq] = 0; /* free it */
1034		nr_released_vectors++;
1035		default_vector = entry->msi_attrib.default_vector;
1036		spin_unlock_irqrestore(&msi_lock, flags);
1037		/* Restore dev->irq to its default pin-assertion vector */
1038		dev->irq = default_vector;
1039		disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
1040					PCI_CAP_ID_MSI);
1041	}
1042}
1043
1044static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
1045{
1046	struct msi_desc *entry;
1047	int head, entry_nr, type;
1048	void __iomem *base;
1049	unsigned long flags;
1050
1051	msi_ops->teardown(vector);
1052
1053	spin_lock_irqsave(&msi_lock, flags);
1054	entry = msi_desc[vector];
1055	if (!entry || entry->dev != dev) {
1056		spin_unlock_irqrestore(&msi_lock, flags);
1057		return -EINVAL;
1058	}
1059	type = entry->msi_attrib.type;
1060	entry_nr = entry->msi_attrib.entry_nr;
1061	head = entry->link.head;
1062	base = entry->mask_base;
1063	msi_desc[entry->link.head]->link.tail = entry->link.tail;
1064	msi_desc[entry->link.tail]->link.head = entry->link.head;
1065	entry->dev = NULL;
1066	if (!reassign) {
1067		vector_irq[vector] = 0;
1068		nr_released_vectors++;
1069	}
1070	msi_desc[vector] = NULL;
1071	spin_unlock_irqrestore(&msi_lock, flags);
1072
1073	kmem_cache_free(msi_cachep, entry);
1074
1075	if (type == PCI_CAP_ID_MSIX) {
1076		if (!reassign)
1077			writel(1, base +
1078				entry_nr * PCI_MSIX_ENTRY_SIZE +
1079				PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1080
1081		if (head == vector)
1082			iounmap(base);
1083	}
1084
1085	return 0;
1086}
1087
1088static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
1089{
1090	int vector = head, tail = 0;
1091	int i, j = 0, nr_entries = 0;
1092	void __iomem *base;
1093	unsigned long flags;
1094
1095	spin_lock_irqsave(&msi_lock, flags);
1096	while (head != tail) {
1097		nr_entries++;
1098		tail = msi_desc[vector]->link.tail;
1099		if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
1100			j = vector;
1101		vector = tail;
1102	}
1103	if (*nvec > nr_entries) {
1104		spin_unlock_irqrestore(&msi_lock, flags);
1105		*nvec = nr_entries;
1106		return -EINVAL;
1107	}
1108	vector = ((j > 0) ? j : head);
1109	for (i = 0; i < *nvec; i++) {
1110		j = msi_desc[vector]->msi_attrib.entry_nr;
1111		msi_desc[vector]->msi_attrib.state = 0;	/* Mark it not active */
1112		vector_irq[vector] = -1;		/* Mark it busy */
1113		nr_released_vectors--;
1114		entries[i].vector = vector;
1115		if (j != (entries + i)->entry) {
1116			base = msi_desc[vector]->mask_base;
1117			msi_desc[vector]->msi_attrib.entry_nr =
1118				(entries + i)->entry;
1119			writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
1120				PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
1121				(entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1122				PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
1123			writel(	readl(base + j * PCI_MSIX_ENTRY_SIZE +
1124				PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
1125				(entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1126				PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
1127			writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
1128				PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
1129				base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
1130				PCI_MSIX_ENTRY_DATA_OFFSET);
1131		}
1132		vector = msi_desc[vector]->link.tail;
1133	}
1134	spin_unlock_irqrestore(&msi_lock, flags);
1135
1136	return 0;
1137}
1138
1139/**
1140 * pci_enable_msix - configure device's MSI-X capability structure
1141 * @dev: pointer to the pci_dev data structure of MSI-X device function
1142 * @entries: pointer to an array of MSI-X entries
1143 * @nvec: number of MSI-X vectors requested for allocation by device driver
1144 *
1145 * Setup the MSI-X capability structure of device function with the number
1146 * of requested vectors upon its software driver call to request for
1147 * MSI-X mode enabled on its hardware device function. A return of zero
1148 * indicates the successful configuration of MSI-X capability structure
1149 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
1150 * Or a return of > 0 indicates that driver request is exceeding the number
1151 * of vectors available. Driver should use the returned value to re-send
1152 * its request.
1153 **/
1154int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1155{
1156	int status, pos, nr_entries, free_vectors;
1157	int i, j, temp;
1158	u16 control;
1159	unsigned long flags;
1160
1161	if (!entries || pci_msi_supported(dev) < 0)
1162 		return -EINVAL;
1163
1164	status = msi_init();
1165	if (status < 0)
1166		return status;
1167
1168	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1169	if (!pos)
1170 		return -EINVAL;
1171
1172	pci_read_config_word(dev, msi_control_reg(pos), &control);
1173	if (control & PCI_MSIX_FLAGS_ENABLE)
1174		return -EINVAL;			/* Already in MSI-X mode */
1175
1176	nr_entries = multi_msix_capable(control);
1177	if (nvec > nr_entries)
1178		return -EINVAL;
1179
1180	/* Check for any invalid entries */
1181	for (i = 0; i < nvec; i++) {
1182		if (entries[i].entry >= nr_entries)
1183			return -EINVAL;		/* invalid entry */
1184		for (j = i + 1; j < nvec; j++) {
1185			if (entries[i].entry == entries[j].entry)
1186				return -EINVAL;	/* duplicate entry */
1187		}
1188	}
1189	temp = dev->irq;
1190	if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1191		/* Lookup Sucess */
1192		nr_entries = nvec;
1193		/* Reroute MSI-X table */
1194		if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
1195			/* #requested > #previous-assigned */
1196			dev->irq = temp;
1197			return nr_entries;
1198		}
1199		dev->irq = temp;
1200		enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
1201		return 0;
1202	}
1203	/* Check whether driver already requested for MSI vector */
1204   	if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
1205		!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1206		printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
1207		       "Device already has an MSI vector assigned\n",
1208		       pci_name(dev));
1209		dev->irq = temp;
1210		return -EINVAL;
1211	}
1212
1213	spin_lock_irqsave(&msi_lock, flags);
1214	/*
1215	 * msi_lock is provided to ensure that enough vectors resources are
1216	 * available before granting.
1217	 */
1218	free_vectors = pci_vector_resources(last_alloc_vector,
1219				nr_released_vectors);
1220	/* Ensure that each MSI/MSI-X device has one vector reserved by
1221	   default to avoid any MSI-X driver to take all available
1222 	   resources */
1223	free_vectors -= nr_reserved_vectors;
1224	/* Find the average of free vectors among MSI-X devices */
1225	if (nr_msix_devices > 0)
1226		free_vectors /= nr_msix_devices;
1227	spin_unlock_irqrestore(&msi_lock, flags);
1228
1229	if (nvec > free_vectors) {
1230		if (free_vectors > 0)
1231			return free_vectors;
1232		else
1233			return -EBUSY;
1234	}
1235
1236	status = msix_capability_init(dev, entries, nvec);
1237	if (!status && nr_msix_devices > 0)
1238		nr_msix_devices--;
1239
1240	return status;
1241}
1242
1243void pci_disable_msix(struct pci_dev* dev)
1244{
1245	int pos, temp;
1246	u16 control;
1247
1248	if (!pci_msi_enable)
1249		return;
1250	if (!dev)
1251		return;
1252
1253	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1254	if (!pos)
1255		return;
1256
1257	pci_read_config_word(dev, msi_control_reg(pos), &control);
1258	if (!(control & PCI_MSIX_FLAGS_ENABLE))
1259		return;
1260
1261	temp = dev->irq;
1262	if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1263		int state, vector, head, tail = 0, warning = 0;
1264		unsigned long flags;
1265
1266		vector = head = dev->irq;
1267		spin_lock_irqsave(&msi_lock, flags);
1268		while (head != tail) {
1269			state = msi_desc[vector]->msi_attrib.state;
1270			if (state)
1271				warning = 1;
1272			else {
1273				vector_irq[vector] = 0; /* free it */
1274				nr_released_vectors++;
1275			}
1276			tail = msi_desc[vector]->link.tail;
1277			vector = tail;
1278		}
1279		spin_unlock_irqrestore(&msi_lock, flags);
1280		if (warning) {
1281			dev->irq = temp;
1282			printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1283			       "free_irq() on all MSI-X vectors\n",
1284			       pci_name(dev));
1285			BUG_ON(warning > 0);
1286		} else {
1287			dev->irq = temp;
1288			disable_msi_mode(dev,
1289				pci_find_capability(dev, PCI_CAP_ID_MSIX),
1290				PCI_CAP_ID_MSIX);
1291
1292		}
1293	}
1294}
1295
1296/**
1297 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1298 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1299 *
1300 * Being called during hotplug remove, from which the device function
1301 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1302 * allocated for this device function, are reclaimed to unused state,
1303 * which may be used later on.
1304 **/
1305void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1306{
1307	int state, pos, temp;
1308	unsigned long flags;
1309
1310	if (!pci_msi_enable || !dev)
1311 		return;
1312
1313	temp = dev->irq;		/* Save IOAPIC IRQ */
1314	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1315	if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1316		spin_lock_irqsave(&msi_lock, flags);
1317		state = msi_desc[dev->irq]->msi_attrib.state;
1318		spin_unlock_irqrestore(&msi_lock, flags);
1319		if (state) {
1320			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1321			       "called without free_irq() on MSI vector %d\n",
1322			       pci_name(dev), dev->irq);
1323			BUG_ON(state > 0);
1324		} else /* Release MSI vector assigned to this device */
1325			msi_free_vector(dev, dev->irq, 0);
1326		dev->irq = temp;		/* Restore IOAPIC IRQ */
1327	}
1328	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1329	if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1330		int vector, head, tail = 0, warning = 0;
1331		void __iomem *base = NULL;
1332
1333		vector = head = dev->irq;
1334		while (head != tail) {
1335			spin_lock_irqsave(&msi_lock, flags);
1336			state = msi_desc[vector]->msi_attrib.state;
1337			tail = msi_desc[vector]->link.tail;
1338			base = msi_desc[vector]->mask_base;
1339			spin_unlock_irqrestore(&msi_lock, flags);
1340			if (state)
1341				warning = 1;
1342			else if (vector != head) /* Release MSI-X vector */
1343				msi_free_vector(dev, vector, 0);
1344			vector = tail;
1345		}
1346		msi_free_vector(dev, vector, 0);
1347		if (warning) {
1348			iounmap(base);
1349			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1350			       "called without free_irq() on all MSI-X vectors\n",
1351			       pci_name(dev));
1352			BUG_ON(warning > 0);
1353		}
1354		dev->irq = temp;		/* Restore IOAPIC IRQ */
1355	}
1356}
1357
1358void pci_no_msi(void)
1359{
1360	pci_msi_enable = 0;
1361}
1362
1363EXPORT_SYMBOL(pci_enable_msi);
1364EXPORT_SYMBOL(pci_disable_msi);
1365EXPORT_SYMBOL(pci_enable_msix);
1366EXPORT_SYMBOL(pci_disable_msix);
1367