msi.c revision fd58e55fcf5568e51da2ed54d7acd049c3fdb184
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/mm.h>
10#include <linux/irq.h>
11#include <linux/interrupt.h>
12#include <linux/init.h>
13#include <linux/config.h>
14#include <linux/ioport.h>
15#include <linux/smp_lock.h>
16#include <linux/pci.h>
17#include <linux/proc_fs.h>
18
19#include <asm/errno.h>
20#include <asm/io.h>
21#include <asm/smp.h>
22
23#include "pci.h"
24#include "msi.h"
25
26static DEFINE_SPINLOCK(msi_lock);
27static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
28static kmem_cache_t* msi_cachep;
29
30static int pci_msi_enable = 1;
31static int last_alloc_vector;
32static int nr_released_vectors;
33static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
34static int nr_msix_devices;
35
36#ifndef CONFIG_X86_IO_APIC
37int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
38u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
39#endif
40
41static struct msi_ops *msi_ops;
42
43int
44msi_register(struct msi_ops *ops)
45{
46	msi_ops = ops;
47	return 0;
48}
49
50static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags)
51{
52	memset(p, 0, NR_IRQS * sizeof(struct msi_desc));
53}
54
55static int msi_cache_init(void)
56{
57	msi_cachep = kmem_cache_create("msi_cache",
58			NR_IRQS * sizeof(struct msi_desc),
59		       	0, SLAB_HWCACHE_ALIGN, msi_cache_ctor, NULL);
60	if (!msi_cachep)
61		return -ENOMEM;
62
63	return 0;
64}
65
66static void msi_set_mask_bit(unsigned int vector, int flag)
67{
68	struct msi_desc *entry;
69
70	entry = (struct msi_desc *)msi_desc[vector];
71	if (!entry || !entry->dev || !entry->mask_base)
72		return;
73	switch (entry->msi_attrib.type) {
74	case PCI_CAP_ID_MSI:
75	{
76		int		pos;
77		u32		mask_bits;
78
79		pos = (long)entry->mask_base;
80		pci_read_config_dword(entry->dev, pos, &mask_bits);
81		mask_bits &= ~(1);
82		mask_bits |= flag;
83		pci_write_config_dword(entry->dev, pos, mask_bits);
84		break;
85	}
86	case PCI_CAP_ID_MSIX:
87	{
88		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
89			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
90		writel(flag, entry->mask_base + offset);
91		break;
92	}
93	default:
94		break;
95	}
96}
97
98#ifdef CONFIG_SMP
99static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
100{
101	struct msi_desc *entry;
102	u32 address_hi, address_lo;
103	unsigned int irq = vector;
104	unsigned int dest_cpu = first_cpu(cpu_mask);
105
106	entry = (struct msi_desc *)msi_desc[vector];
107	if (!entry || !entry->dev)
108		return;
109
110	switch (entry->msi_attrib.type) {
111	case PCI_CAP_ID_MSI:
112	{
113		int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI);
114
115		if (!pos)
116			return;
117
118		pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
119			&address_hi);
120		pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
121			&address_lo);
122
123		msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
124
125		pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
126			address_hi);
127		pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
128			address_lo);
129		set_native_irq_info(irq, cpu_mask);
130		break;
131	}
132	case PCI_CAP_ID_MSIX:
133	{
134		int offset_hi =
135			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
136				PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
137		int offset_lo =
138			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
139				PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
140
141		address_hi = readl(entry->mask_base + offset_hi);
142		address_lo = readl(entry->mask_base + offset_lo);
143
144		msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
145
146		writel(address_hi, entry->mask_base + offset_hi);
147		writel(address_lo, entry->mask_base + offset_lo);
148		set_native_irq_info(irq, cpu_mask);
149		break;
150	}
151	default:
152		break;
153	}
154}
155#else
156#define set_msi_affinity NULL
157#endif /* CONFIG_SMP */
158
159static void mask_MSI_irq(unsigned int vector)
160{
161	msi_set_mask_bit(vector, 1);
162}
163
164static void unmask_MSI_irq(unsigned int vector)
165{
166	msi_set_mask_bit(vector, 0);
167}
168
169static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
170{
171	struct msi_desc *entry;
172	unsigned long flags;
173
174	spin_lock_irqsave(&msi_lock, flags);
175	entry = msi_desc[vector];
176	if (!entry || !entry->dev) {
177		spin_unlock_irqrestore(&msi_lock, flags);
178		return 0;
179	}
180	entry->msi_attrib.state = 1;	/* Mark it active */
181	spin_unlock_irqrestore(&msi_lock, flags);
182
183	return 0;	/* never anything pending */
184}
185
186static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
187{
188	startup_msi_irq_wo_maskbit(vector);
189	unmask_MSI_irq(vector);
190	return 0;	/* never anything pending */
191}
192
193static void shutdown_msi_irq(unsigned int vector)
194{
195	struct msi_desc *entry;
196	unsigned long flags;
197
198	spin_lock_irqsave(&msi_lock, flags);
199	entry = msi_desc[vector];
200	if (entry && entry->dev)
201		entry->msi_attrib.state = 0;	/* Mark it not active */
202	spin_unlock_irqrestore(&msi_lock, flags);
203}
204
205static void end_msi_irq_wo_maskbit(unsigned int vector)
206{
207	move_native_irq(vector);
208	ack_APIC_irq();
209}
210
211static void end_msi_irq_w_maskbit(unsigned int vector)
212{
213	move_native_irq(vector);
214	unmask_MSI_irq(vector);
215	ack_APIC_irq();
216}
217
218static void do_nothing(unsigned int vector)
219{
220}
221
222/*
223 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
224 * which implement the MSI-X Capability Structure.
225 */
226static struct hw_interrupt_type msix_irq_type = {
227	.typename	= "PCI-MSI-X",
228	.startup	= startup_msi_irq_w_maskbit,
229	.shutdown	= shutdown_msi_irq,
230	.enable		= unmask_MSI_irq,
231	.disable	= mask_MSI_irq,
232	.ack		= mask_MSI_irq,
233	.end		= end_msi_irq_w_maskbit,
234	.set_affinity	= set_msi_affinity
235};
236
237/*
238 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
239 * which implement the MSI Capability Structure with
240 * Mask-and-Pending Bits.
241 */
242static struct hw_interrupt_type msi_irq_w_maskbit_type = {
243	.typename	= "PCI-MSI",
244	.startup	= startup_msi_irq_w_maskbit,
245	.shutdown	= shutdown_msi_irq,
246	.enable		= unmask_MSI_irq,
247	.disable	= mask_MSI_irq,
248	.ack		= mask_MSI_irq,
249	.end		= end_msi_irq_w_maskbit,
250	.set_affinity	= set_msi_affinity
251};
252
253/*
254 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
255 * which implement the MSI Capability Structure without
256 * Mask-and-Pending Bits.
257 */
258static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
259	.typename	= "PCI-MSI",
260	.startup	= startup_msi_irq_wo_maskbit,
261	.shutdown	= shutdown_msi_irq,
262	.enable		= do_nothing,
263	.disable	= do_nothing,
264	.ack		= do_nothing,
265	.end		= end_msi_irq_wo_maskbit,
266	.set_affinity	= set_msi_affinity
267};
268
269static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
270static int assign_msi_vector(void)
271{
272	static int new_vector_avail = 1;
273	int vector;
274	unsigned long flags;
275
276	/*
277	 * msi_lock is provided to ensure that successful allocation of MSI
278	 * vector is assigned unique among drivers.
279	 */
280	spin_lock_irqsave(&msi_lock, flags);
281
282	if (!new_vector_avail) {
283		int free_vector = 0;
284
285		/*
286	 	 * vector_irq[] = -1 indicates that this specific vector is:
287	 	 * - assigned for MSI (since MSI have no associated IRQ) or
288	 	 * - assigned for legacy if less than 16, or
289	 	 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
290	 	 * vector_irq[] = 0 indicates that this vector, previously
291		 * assigned for MSI, is freed by hotplug removed operations.
292		 * This vector will be reused for any subsequent hotplug added
293		 * operations.
294	 	 * vector_irq[] > 0 indicates that this vector is assigned for
295		 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
296		 * vector-to-IOxAPIC IRQ mapping.
297	 	 */
298		for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
299			if (vector_irq[vector] != 0)
300				continue;
301			free_vector = vector;
302			if (!msi_desc[vector])
303			      	break;
304			else
305				continue;
306		}
307		if (!free_vector) {
308			spin_unlock_irqrestore(&msi_lock, flags);
309			return -EBUSY;
310		}
311		vector_irq[free_vector] = -1;
312		nr_released_vectors--;
313		spin_unlock_irqrestore(&msi_lock, flags);
314		if (msi_desc[free_vector] != NULL) {
315			struct pci_dev *dev;
316			int tail;
317
318			/* free all linked vectors before re-assign */
319			do {
320				spin_lock_irqsave(&msi_lock, flags);
321				dev = msi_desc[free_vector]->dev;
322				tail = msi_desc[free_vector]->link.tail;
323				spin_unlock_irqrestore(&msi_lock, flags);
324				msi_free_vector(dev, tail, 1);
325			} while (free_vector != tail);
326		}
327
328		return free_vector;
329	}
330	vector = assign_irq_vector(AUTO_ASSIGN);
331	last_alloc_vector = vector;
332	if (vector  == LAST_DEVICE_VECTOR)
333		new_vector_avail = 0;
334
335	spin_unlock_irqrestore(&msi_lock, flags);
336	return vector;
337}
338
339static int get_new_vector(void)
340{
341	int vector = assign_msi_vector();
342
343	if (vector > 0)
344		set_intr_gate(vector, interrupt[vector]);
345
346	return vector;
347}
348
349static int msi_init(void)
350{
351	static int status = -ENOMEM;
352
353	if (!status)
354		return status;
355
356	if (pci_msi_quirk) {
357		pci_msi_enable = 0;
358		printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
359		status = -EINVAL;
360		return status;
361	}
362
363	status = msi_arch_init();
364	if (status < 0) {
365		pci_msi_enable = 0;
366		printk(KERN_WARNING
367		       "PCI: MSI arch init failed.  MSI disabled.\n");
368		return status;
369	}
370
371	if (! msi_ops) {
372		printk(KERN_WARNING
373		       "PCI: MSI ops not registered. MSI disabled.\n");
374		status = -EINVAL;
375		return status;
376	}
377
378	last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
379	status = msi_cache_init();
380	if (status < 0) {
381		pci_msi_enable = 0;
382		printk(KERN_WARNING "PCI: MSI cache init failed\n");
383		return status;
384	}
385
386	if (last_alloc_vector < 0) {
387		pci_msi_enable = 0;
388		printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
389		status = -EBUSY;
390		return status;
391	}
392	vector_irq[last_alloc_vector] = 0;
393	nr_released_vectors++;
394
395	return status;
396}
397
398static int get_msi_vector(struct pci_dev *dev)
399{
400	return get_new_vector();
401}
402
403static struct msi_desc* alloc_msi_entry(void)
404{
405	struct msi_desc *entry;
406
407	entry = kmem_cache_alloc(msi_cachep, SLAB_KERNEL);
408	if (!entry)
409		return NULL;
410
411	memset(entry, 0, sizeof(struct msi_desc));
412	entry->link.tail = entry->link.head = 0;	/* single message */
413	entry->dev = NULL;
414
415	return entry;
416}
417
418static void attach_msi_entry(struct msi_desc *entry, int vector)
419{
420	unsigned long flags;
421
422	spin_lock_irqsave(&msi_lock, flags);
423	msi_desc[vector] = entry;
424	spin_unlock_irqrestore(&msi_lock, flags);
425}
426
427static void irq_handler_init(int cap_id, int pos, int mask)
428{
429	unsigned long flags;
430
431	spin_lock_irqsave(&irq_desc[pos].lock, flags);
432	if (cap_id == PCI_CAP_ID_MSIX)
433		irq_desc[pos].handler = &msix_irq_type;
434	else {
435		if (!mask)
436			irq_desc[pos].handler = &msi_irq_wo_maskbit_type;
437		else
438			irq_desc[pos].handler = &msi_irq_w_maskbit_type;
439	}
440	spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
441}
442
443static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
444{
445	u16 control;
446
447	pci_read_config_word(dev, msi_control_reg(pos), &control);
448	if (type == PCI_CAP_ID_MSI) {
449		/* Set enabled bits to single MSI & enable MSI_enable bit */
450		msi_enable(control, 1);
451		pci_write_config_word(dev, msi_control_reg(pos), control);
452	} else {
453		msix_enable(control);
454		pci_write_config_word(dev, msi_control_reg(pos), control);
455	}
456    	if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
457		/* PCI Express Endpoint device detected */
458		pci_intx(dev, 0);  /* disable intx */
459	}
460}
461
462void disable_msi_mode(struct pci_dev *dev, int pos, int type)
463{
464	u16 control;
465
466	pci_read_config_word(dev, msi_control_reg(pos), &control);
467	if (type == PCI_CAP_ID_MSI) {
468		/* Set enabled bits to single MSI & enable MSI_enable bit */
469		msi_disable(control);
470		pci_write_config_word(dev, msi_control_reg(pos), control);
471	} else {
472		msix_disable(control);
473		pci_write_config_word(dev, msi_control_reg(pos), control);
474	}
475    	if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
476		/* PCI Express Endpoint device detected */
477		pci_intx(dev, 1);  /* enable intx */
478	}
479}
480
481static int msi_lookup_vector(struct pci_dev *dev, int type)
482{
483	int vector;
484	unsigned long flags;
485
486	spin_lock_irqsave(&msi_lock, flags);
487	for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
488		if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
489			msi_desc[vector]->msi_attrib.type != type ||
490			msi_desc[vector]->msi_attrib.default_vector != dev->irq)
491			continue;
492		spin_unlock_irqrestore(&msi_lock, flags);
493		/* This pre-assigned MSI vector for this device
494		   already exits. Override dev->irq with this vector */
495		dev->irq = vector;
496		return 0;
497	}
498	spin_unlock_irqrestore(&msi_lock, flags);
499
500	return -EACCES;
501}
502
503void pci_scan_msi_device(struct pci_dev *dev)
504{
505	if (!dev)
506		return;
507
508   	if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
509		nr_msix_devices++;
510	else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
511		nr_reserved_vectors++;
512}
513
514#ifdef CONFIG_PM
515int pci_save_msi_state(struct pci_dev *dev)
516{
517	int pos, i = 0;
518	u16 control;
519	struct pci_cap_saved_state *save_state;
520	u32 *cap;
521
522	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
523	if (pos <= 0 || dev->no_msi)
524		return 0;
525
526	pci_read_config_word(dev, msi_control_reg(pos), &control);
527	if (!(control & PCI_MSI_FLAGS_ENABLE))
528		return 0;
529
530	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
531		GFP_KERNEL);
532	if (!save_state) {
533		printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
534		return -ENOMEM;
535	}
536	cap = &save_state->data[0];
537
538	pci_read_config_dword(dev, pos, &cap[i++]);
539	control = cap[0] >> 16;
540	pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
541	if (control & PCI_MSI_FLAGS_64BIT) {
542		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
543		pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
544	} else
545		pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
546	if (control & PCI_MSI_FLAGS_MASKBIT)
547		pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
548	disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
549	save_state->cap_nr = PCI_CAP_ID_MSI;
550	pci_add_saved_cap(dev, save_state);
551	return 0;
552}
553
554void pci_restore_msi_state(struct pci_dev *dev)
555{
556	int i = 0, pos;
557	u16 control;
558	struct pci_cap_saved_state *save_state;
559	u32 *cap;
560
561	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
562	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
563	if (!save_state || pos <= 0)
564		return;
565	cap = &save_state->data[0];
566
567	control = cap[i++] >> 16;
568	pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
569	if (control & PCI_MSI_FLAGS_64BIT) {
570		pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
571		pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
572	} else
573		pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
574	if (control & PCI_MSI_FLAGS_MASKBIT)
575		pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
576	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
577	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
578	pci_remove_saved_cap(save_state);
579	kfree(save_state);
580}
581
582int pci_save_msix_state(struct pci_dev *dev)
583{
584	int pos;
585	int temp;
586	int vector, head, tail = 0;
587	u16 control;
588	struct pci_cap_saved_state *save_state;
589
590	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
591	if (pos <= 0 || dev->no_msi)
592		return 0;
593
594	/* save the capability */
595	pci_read_config_word(dev, msi_control_reg(pos), &control);
596	if (!(control & PCI_MSIX_FLAGS_ENABLE))
597		return 0;
598	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
599		GFP_KERNEL);
600	if (!save_state) {
601		printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
602		return -ENOMEM;
603	}
604	*((u16 *)&save_state->data[0]) = control;
605
606	/* save the table */
607	temp = dev->irq;
608	if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
609		kfree(save_state);
610		return -EINVAL;
611	}
612
613	vector = head = dev->irq;
614	while (head != tail) {
615		int j;
616		void __iomem *base;
617		struct msi_desc *entry;
618
619		entry = msi_desc[vector];
620		base = entry->mask_base;
621		j = entry->msi_attrib.entry_nr;
622
623		entry->address_lo_save =
624			readl(base + j * PCI_MSIX_ENTRY_SIZE +
625			      PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
626		entry->address_hi_save =
627			readl(base + j * PCI_MSIX_ENTRY_SIZE +
628			      PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
629		entry->data_save =
630			readl(base + j * PCI_MSIX_ENTRY_SIZE +
631			      PCI_MSIX_ENTRY_DATA_OFFSET);
632
633		tail = msi_desc[vector]->link.tail;
634		vector = tail;
635	}
636	dev->irq = temp;
637
638	disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
639	save_state->cap_nr = PCI_CAP_ID_MSIX;
640	pci_add_saved_cap(dev, save_state);
641	return 0;
642}
643
644void pci_restore_msix_state(struct pci_dev *dev)
645{
646	u16 save;
647	int pos;
648	int vector, head, tail = 0;
649	void __iomem *base;
650	int j;
651	struct msi_desc *entry;
652	int temp;
653	struct pci_cap_saved_state *save_state;
654
655	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
656	if (!save_state)
657		return;
658	save = *((u16 *)&save_state->data[0]);
659	pci_remove_saved_cap(save_state);
660	kfree(save_state);
661
662	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
663	if (pos <= 0)
664		return;
665
666	/* route the table */
667	temp = dev->irq;
668	if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
669		return;
670	vector = head = dev->irq;
671	while (head != tail) {
672		entry = msi_desc[vector];
673		base = entry->mask_base;
674		j = entry->msi_attrib.entry_nr;
675
676		writel(entry->address_lo_save,
677			base + j * PCI_MSIX_ENTRY_SIZE +
678			PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
679		writel(entry->address_hi_save,
680			base + j * PCI_MSIX_ENTRY_SIZE +
681			PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
682		writel(entry->data_save,
683			base + j * PCI_MSIX_ENTRY_SIZE +
684			PCI_MSIX_ENTRY_DATA_OFFSET);
685
686		tail = msi_desc[vector]->link.tail;
687		vector = tail;
688	}
689	dev->irq = temp;
690
691	pci_write_config_word(dev, msi_control_reg(pos), save);
692	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
693}
694#endif
695
696static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
697{
698	int status;
699	u32 address_hi;
700	u32 address_lo;
701	u32 data;
702	int pos, vector = dev->irq;
703	u16 control;
704
705   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
706	pci_read_config_word(dev, msi_control_reg(pos), &control);
707
708	/* Configure MSI capability structure */
709	status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data);
710	if (status < 0)
711		return status;
712
713	pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo);
714	if (is_64bit_address(control)) {
715		pci_write_config_dword(dev,
716			msi_upper_address_reg(pos), address_hi);
717		pci_write_config_word(dev,
718			msi_data_reg(pos, 1), data);
719	} else
720		pci_write_config_word(dev,
721			msi_data_reg(pos, 0), data);
722	if (entry->msi_attrib.maskbit) {
723		unsigned int maskbits, temp;
724		/* All MSIs are unmasked by default, Mask them all */
725		pci_read_config_dword(dev,
726			msi_mask_bits_reg(pos, is_64bit_address(control)),
727			&maskbits);
728		temp = (1 << multi_msi_capable(control));
729		temp = ((temp - 1) & ~temp);
730		maskbits |= temp;
731		pci_write_config_dword(dev,
732			msi_mask_bits_reg(pos, is_64bit_address(control)),
733			maskbits);
734	}
735
736	return 0;
737}
738
739/**
740 * msi_capability_init - configure device's MSI capability structure
741 * @dev: pointer to the pci_dev data structure of MSI device function
742 *
743 * Setup the MSI capability structure of device function with a single
744 * MSI vector, regardless of device function is capable of handling
745 * multiple messages. A return of zero indicates the successful setup
746 * of an entry zero with the new MSI vector or non-zero for otherwise.
747 **/
748static int msi_capability_init(struct pci_dev *dev)
749{
750	int status;
751	struct msi_desc *entry;
752	int pos, vector;
753	u16 control;
754
755   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
756	pci_read_config_word(dev, msi_control_reg(pos), &control);
757	/* MSI Entry Initialization */
758	entry = alloc_msi_entry();
759	if (!entry)
760		return -ENOMEM;
761
762	vector = get_msi_vector(dev);
763	if (vector < 0) {
764		kmem_cache_free(msi_cachep, entry);
765		return -EBUSY;
766	}
767	entry->link.head = vector;
768	entry->link.tail = vector;
769	entry->msi_attrib.type = PCI_CAP_ID_MSI;
770	entry->msi_attrib.state = 0;			/* Mark it not active */
771	entry->msi_attrib.entry_nr = 0;
772	entry->msi_attrib.maskbit = is_mask_bit_support(control);
773	entry->msi_attrib.default_vector = dev->irq;	/* Save IOAPIC IRQ */
774	dev->irq = vector;
775	entry->dev = dev;
776	if (is_mask_bit_support(control)) {
777		entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
778				is_64bit_address(control));
779	}
780	/* Replace with MSI handler */
781	irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
782	/* Configure MSI capability structure */
783	status = msi_register_init(dev, entry);
784	if (status != 0) {
785		dev->irq = entry->msi_attrib.default_vector;
786		kmem_cache_free(msi_cachep, entry);
787		return status;
788	}
789
790	attach_msi_entry(entry, vector);
791	/* Set MSI enabled bits	 */
792	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
793
794	return 0;
795}
796
797/**
798 * msix_capability_init - configure device's MSI-X capability
799 * @dev: pointer to the pci_dev data structure of MSI-X device function
800 * @entries: pointer to an array of struct msix_entry entries
801 * @nvec: number of @entries
802 *
803 * Setup the MSI-X capability structure of device function with a
804 * single MSI-X vector. A return of zero indicates the successful setup of
805 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
806 **/
807static int msix_capability_init(struct pci_dev *dev,
808				struct msix_entry *entries, int nvec)
809{
810	struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
811	u32 address_hi;
812	u32 address_lo;
813	u32 data;
814	int status;
815	int vector, pos, i, j, nr_entries, temp = 0;
816	unsigned long phys_addr;
817	u32 table_offset;
818 	u16 control;
819	u8 bir;
820	void __iomem *base;
821
822   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
823	/* Request & Map MSI-X table region */
824 	pci_read_config_word(dev, msi_control_reg(pos), &control);
825	nr_entries = multi_msix_capable(control);
826
827 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
828	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
829	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
830	phys_addr = pci_resource_start (dev, bir) + table_offset;
831	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
832	if (base == NULL)
833		return -ENOMEM;
834
835	/* MSI-X Table Initialization */
836	for (i = 0; i < nvec; i++) {
837		entry = alloc_msi_entry();
838		if (!entry)
839			break;
840		vector = get_msi_vector(dev);
841		if (vector < 0) {
842			kmem_cache_free(msi_cachep, entry);
843			break;
844		}
845
846 		j = entries[i].entry;
847 		entries[i].vector = vector;
848		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
849 		entry->msi_attrib.state = 0;		/* Mark it not active */
850		entry->msi_attrib.entry_nr = j;
851		entry->msi_attrib.maskbit = 1;
852		entry->msi_attrib.default_vector = dev->irq;
853		entry->dev = dev;
854		entry->mask_base = base;
855		if (!head) {
856			entry->link.head = vector;
857			entry->link.tail = vector;
858			head = entry;
859		} else {
860			entry->link.head = temp;
861			entry->link.tail = tail->link.tail;
862			tail->link.tail = vector;
863			head->link.head = vector;
864		}
865		temp = vector;
866		tail = entry;
867		/* Replace with MSI-X handler */
868		irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
869		/* Configure MSI-X capability structure */
870		status = msi_ops->setup(dev, vector,
871					&address_hi,
872					&address_lo,
873					&data);
874		if (status < 0)
875			break;
876
877		writel(address_lo,
878			base + j * PCI_MSIX_ENTRY_SIZE +
879			PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
880		writel(address_hi,
881			base + j * PCI_MSIX_ENTRY_SIZE +
882			PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
883		writel(data,
884			base + j * PCI_MSIX_ENTRY_SIZE +
885			PCI_MSIX_ENTRY_DATA_OFFSET);
886		attach_msi_entry(entry, vector);
887	}
888	if (i != nvec) {
889		i--;
890		for (; i >= 0; i--) {
891			vector = (entries + i)->vector;
892			msi_free_vector(dev, vector, 0);
893			(entries + i)->vector = 0;
894		}
895		return -EBUSY;
896	}
897	/* Set MSI-X enabled bits */
898	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
899
900	return 0;
901}
902
903/**
904 * pci_enable_msi - configure device's MSI capability structure
905 * @dev: pointer to the pci_dev data structure of MSI device function
906 *
907 * Setup the MSI capability structure of device function with
908 * a single MSI vector upon its software driver call to request for
909 * MSI mode enabled on its hardware device function. A return of zero
910 * indicates the successful setup of an entry zero with the new MSI
911 * vector or non-zero for otherwise.
912 **/
913int pci_enable_msi(struct pci_dev* dev)
914{
915	int pos, temp, status = -EINVAL;
916	u16 control;
917
918	if (!pci_msi_enable || !dev)
919 		return status;
920
921	if (dev->no_msi)
922		return status;
923
924	if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
925		return -EINVAL;
926
927	temp = dev->irq;
928
929	status = msi_init();
930	if (status < 0)
931		return status;
932
933	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
934	if (!pos)
935		return -EINVAL;
936
937	pci_read_config_word(dev, msi_control_reg(pos), &control);
938	if (control & PCI_MSI_FLAGS_ENABLE)
939		return 0;			/* Already in MSI mode */
940
941	if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
942		/* Lookup Sucess */
943		unsigned long flags;
944
945		spin_lock_irqsave(&msi_lock, flags);
946		if (!vector_irq[dev->irq]) {
947			msi_desc[dev->irq]->msi_attrib.state = 0;
948			vector_irq[dev->irq] = -1;
949			nr_released_vectors--;
950			spin_unlock_irqrestore(&msi_lock, flags);
951			status = msi_register_init(dev, msi_desc[dev->irq]);
952			if (status == 0)
953				enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
954			return status;
955		}
956		spin_unlock_irqrestore(&msi_lock, flags);
957		dev->irq = temp;
958	}
959	/* Check whether driver already requested for MSI-X vectors */
960	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
961	if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
962			printk(KERN_INFO "PCI: %s: Can't enable MSI.  "
963			       "Device already has MSI-X vectors assigned\n",
964			       pci_name(dev));
965			dev->irq = temp;
966			return -EINVAL;
967	}
968	status = msi_capability_init(dev);
969	if (!status) {
970   		if (!pos)
971			nr_reserved_vectors--;	/* Only MSI capable */
972		else if (nr_msix_devices > 0)
973			nr_msix_devices--;	/* Both MSI and MSI-X capable,
974						   but choose enabling MSI */
975	}
976
977	return status;
978}
979
980void pci_disable_msi(struct pci_dev* dev)
981{
982	struct msi_desc *entry;
983	int pos, default_vector;
984	u16 control;
985	unsigned long flags;
986
987	if (!pci_msi_enable)
988		return;
989	if (!dev)
990		return;
991
992	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
993	if (!pos)
994		return;
995
996	pci_read_config_word(dev, msi_control_reg(pos), &control);
997	if (!(control & PCI_MSI_FLAGS_ENABLE))
998		return;
999
1000	spin_lock_irqsave(&msi_lock, flags);
1001	entry = msi_desc[dev->irq];
1002	if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
1003		spin_unlock_irqrestore(&msi_lock, flags);
1004		return;
1005	}
1006	if (entry->msi_attrib.state) {
1007		spin_unlock_irqrestore(&msi_lock, flags);
1008		printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
1009		       "free_irq() on MSI vector %d\n",
1010		       pci_name(dev), dev->irq);
1011		BUG_ON(entry->msi_attrib.state > 0);
1012	} else {
1013		vector_irq[dev->irq] = 0; /* free it */
1014		nr_released_vectors++;
1015		default_vector = entry->msi_attrib.default_vector;
1016		spin_unlock_irqrestore(&msi_lock, flags);
1017		/* Restore dev->irq to its default pin-assertion vector */
1018		dev->irq = default_vector;
1019		disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
1020					PCI_CAP_ID_MSI);
1021	}
1022}
1023
1024static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
1025{
1026	struct msi_desc *entry;
1027	int head, entry_nr, type;
1028	void __iomem *base;
1029	unsigned long flags;
1030
1031	msi_ops->teardown(vector);
1032
1033	spin_lock_irqsave(&msi_lock, flags);
1034	entry = msi_desc[vector];
1035	if (!entry || entry->dev != dev) {
1036		spin_unlock_irqrestore(&msi_lock, flags);
1037		return -EINVAL;
1038	}
1039	type = entry->msi_attrib.type;
1040	entry_nr = entry->msi_attrib.entry_nr;
1041	head = entry->link.head;
1042	base = entry->mask_base;
1043	msi_desc[entry->link.head]->link.tail = entry->link.tail;
1044	msi_desc[entry->link.tail]->link.head = entry->link.head;
1045	entry->dev = NULL;
1046	if (!reassign) {
1047		vector_irq[vector] = 0;
1048		nr_released_vectors++;
1049	}
1050	msi_desc[vector] = NULL;
1051	spin_unlock_irqrestore(&msi_lock, flags);
1052
1053	kmem_cache_free(msi_cachep, entry);
1054
1055	if (type == PCI_CAP_ID_MSIX) {
1056		if (!reassign)
1057			writel(1, base +
1058				entry_nr * PCI_MSIX_ENTRY_SIZE +
1059				PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1060
1061		if (head == vector) {
1062			/*
1063			 * Detect last MSI-X vector to be released.
1064			 * Release the MSI-X memory-mapped table.
1065			 */
1066#if 0
1067			int pos, nr_entries;
1068			unsigned long phys_addr;
1069			u32 table_offset;
1070			u16 control;
1071			u8 bir;
1072
1073   			pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1074			pci_read_config_word(dev, msi_control_reg(pos),
1075				&control);
1076			nr_entries = multi_msix_capable(control);
1077			pci_read_config_dword(dev, msix_table_offset_reg(pos),
1078				&table_offset);
1079			bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
1080			table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
1081			phys_addr = pci_resource_start(dev, bir) + table_offset;
1082/*
1083 * FIXME!  and what did you want to do with phys_addr?
1084 */
1085#endif
1086			iounmap(base);
1087		}
1088	}
1089
1090	return 0;
1091}
1092
1093static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
1094{
1095	int vector = head, tail = 0;
1096	int i, j = 0, nr_entries = 0;
1097	void __iomem *base;
1098	unsigned long flags;
1099
1100	spin_lock_irqsave(&msi_lock, flags);
1101	while (head != tail) {
1102		nr_entries++;
1103		tail = msi_desc[vector]->link.tail;
1104		if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
1105			j = vector;
1106		vector = tail;
1107	}
1108	if (*nvec > nr_entries) {
1109		spin_unlock_irqrestore(&msi_lock, flags);
1110		*nvec = nr_entries;
1111		return -EINVAL;
1112	}
1113	vector = ((j > 0) ? j : head);
1114	for (i = 0; i < *nvec; i++) {
1115		j = msi_desc[vector]->msi_attrib.entry_nr;
1116		msi_desc[vector]->msi_attrib.state = 0;	/* Mark it not active */
1117		vector_irq[vector] = -1;		/* Mark it busy */
1118		nr_released_vectors--;
1119		entries[i].vector = vector;
1120		if (j != (entries + i)->entry) {
1121			base = msi_desc[vector]->mask_base;
1122			msi_desc[vector]->msi_attrib.entry_nr =
1123				(entries + i)->entry;
1124			writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
1125				PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
1126				(entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1127				PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
1128			writel(	readl(base + j * PCI_MSIX_ENTRY_SIZE +
1129				PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
1130				(entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1131				PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
1132			writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
1133				PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
1134				base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
1135				PCI_MSIX_ENTRY_DATA_OFFSET);
1136		}
1137		vector = msi_desc[vector]->link.tail;
1138	}
1139	spin_unlock_irqrestore(&msi_lock, flags);
1140
1141	return 0;
1142}
1143
1144/**
1145 * pci_enable_msix - configure device's MSI-X capability structure
1146 * @dev: pointer to the pci_dev data structure of MSI-X device function
1147 * @entries: pointer to an array of MSI-X entries
1148 * @nvec: number of MSI-X vectors requested for allocation by device driver
1149 *
1150 * Setup the MSI-X capability structure of device function with the number
1151 * of requested vectors upon its software driver call to request for
1152 * MSI-X mode enabled on its hardware device function. A return of zero
1153 * indicates the successful configuration of MSI-X capability structure
1154 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
1155 * Or a return of > 0 indicates that driver request is exceeding the number
1156 * of vectors available. Driver should use the returned value to re-send
1157 * its request.
1158 **/
1159int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1160{
1161	int status, pos, nr_entries, free_vectors;
1162	int i, j, temp;
1163	u16 control;
1164	unsigned long flags;
1165
1166	if (!pci_msi_enable || !dev || !entries)
1167 		return -EINVAL;
1168
1169	status = msi_init();
1170	if (status < 0)
1171		return status;
1172
1173	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1174	if (!pos)
1175 		return -EINVAL;
1176
1177	pci_read_config_word(dev, msi_control_reg(pos), &control);
1178	if (control & PCI_MSIX_FLAGS_ENABLE)
1179		return -EINVAL;			/* Already in MSI-X mode */
1180
1181	nr_entries = multi_msix_capable(control);
1182	if (nvec > nr_entries)
1183		return -EINVAL;
1184
1185	/* Check for any invalid entries */
1186	for (i = 0; i < nvec; i++) {
1187		if (entries[i].entry >= nr_entries)
1188			return -EINVAL;		/* invalid entry */
1189		for (j = i + 1; j < nvec; j++) {
1190			if (entries[i].entry == entries[j].entry)
1191				return -EINVAL;	/* duplicate entry */
1192		}
1193	}
1194	temp = dev->irq;
1195	if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1196		/* Lookup Sucess */
1197		nr_entries = nvec;
1198		/* Reroute MSI-X table */
1199		if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
1200			/* #requested > #previous-assigned */
1201			dev->irq = temp;
1202			return nr_entries;
1203		}
1204		dev->irq = temp;
1205		enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
1206		return 0;
1207	}
1208	/* Check whether driver already requested for MSI vector */
1209   	if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
1210		!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1211		printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
1212		       "Device already has an MSI vector assigned\n",
1213		       pci_name(dev));
1214		dev->irq = temp;
1215		return -EINVAL;
1216	}
1217
1218	spin_lock_irqsave(&msi_lock, flags);
1219	/*
1220	 * msi_lock is provided to ensure that enough vectors resources are
1221	 * available before granting.
1222	 */
1223	free_vectors = pci_vector_resources(last_alloc_vector,
1224				nr_released_vectors);
1225	/* Ensure that each MSI/MSI-X device has one vector reserved by
1226	   default to avoid any MSI-X driver to take all available
1227 	   resources */
1228	free_vectors -= nr_reserved_vectors;
1229	/* Find the average of free vectors among MSI-X devices */
1230	if (nr_msix_devices > 0)
1231		free_vectors /= nr_msix_devices;
1232	spin_unlock_irqrestore(&msi_lock, flags);
1233
1234	if (nvec > free_vectors) {
1235		if (free_vectors > 0)
1236			return free_vectors;
1237		else
1238			return -EBUSY;
1239	}
1240
1241	status = msix_capability_init(dev, entries, nvec);
1242	if (!status && nr_msix_devices > 0)
1243		nr_msix_devices--;
1244
1245	return status;
1246}
1247
1248void pci_disable_msix(struct pci_dev* dev)
1249{
1250	int pos, temp;
1251	u16 control;
1252
1253	if (!pci_msi_enable)
1254		return;
1255	if (!dev)
1256		return;
1257
1258	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1259	if (!pos)
1260		return;
1261
1262	pci_read_config_word(dev, msi_control_reg(pos), &control);
1263	if (!(control & PCI_MSIX_FLAGS_ENABLE))
1264		return;
1265
1266	temp = dev->irq;
1267	if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1268		int state, vector, head, tail = 0, warning = 0;
1269		unsigned long flags;
1270
1271		vector = head = dev->irq;
1272		spin_lock_irqsave(&msi_lock, flags);
1273		while (head != tail) {
1274			state = msi_desc[vector]->msi_attrib.state;
1275			if (state)
1276				warning = 1;
1277			else {
1278				vector_irq[vector] = 0; /* free it */
1279				nr_released_vectors++;
1280			}
1281			tail = msi_desc[vector]->link.tail;
1282			vector = tail;
1283		}
1284		spin_unlock_irqrestore(&msi_lock, flags);
1285		if (warning) {
1286			dev->irq = temp;
1287			printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1288			       "free_irq() on all MSI-X vectors\n",
1289			       pci_name(dev));
1290			BUG_ON(warning > 0);
1291		} else {
1292			dev->irq = temp;
1293			disable_msi_mode(dev,
1294				pci_find_capability(dev, PCI_CAP_ID_MSIX),
1295				PCI_CAP_ID_MSIX);
1296
1297		}
1298	}
1299}
1300
1301/**
1302 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1303 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1304 *
1305 * Being called during hotplug remove, from which the device function
1306 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1307 * allocated for this device function, are reclaimed to unused state,
1308 * which may be used later on.
1309 **/
1310void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1311{
1312	int state, pos, temp;
1313	unsigned long flags;
1314
1315	if (!pci_msi_enable || !dev)
1316 		return;
1317
1318	temp = dev->irq;		/* Save IOAPIC IRQ */
1319	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1320	if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1321		spin_lock_irqsave(&msi_lock, flags);
1322		state = msi_desc[dev->irq]->msi_attrib.state;
1323		spin_unlock_irqrestore(&msi_lock, flags);
1324		if (state) {
1325			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1326			       "called without free_irq() on MSI vector %d\n",
1327			       pci_name(dev), dev->irq);
1328			BUG_ON(state > 0);
1329		} else /* Release MSI vector assigned to this device */
1330			msi_free_vector(dev, dev->irq, 0);
1331		dev->irq = temp;		/* Restore IOAPIC IRQ */
1332	}
1333	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1334	if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1335		int vector, head, tail = 0, warning = 0;
1336		void __iomem *base = NULL;
1337
1338		vector = head = dev->irq;
1339		while (head != tail) {
1340			spin_lock_irqsave(&msi_lock, flags);
1341			state = msi_desc[vector]->msi_attrib.state;
1342			tail = msi_desc[vector]->link.tail;
1343			base = msi_desc[vector]->mask_base;
1344			spin_unlock_irqrestore(&msi_lock, flags);
1345			if (state)
1346				warning = 1;
1347			else if (vector != head) /* Release MSI-X vector */
1348				msi_free_vector(dev, vector, 0);
1349			vector = tail;
1350		}
1351		msi_free_vector(dev, vector, 0);
1352		if (warning) {
1353			/* Force to release the MSI-X memory-mapped table */
1354#if 0
1355			unsigned long phys_addr;
1356			u32 table_offset;
1357			u16 control;
1358			u8 bir;
1359
1360			pci_read_config_word(dev, msi_control_reg(pos),
1361				&control);
1362			pci_read_config_dword(dev, msix_table_offset_reg(pos),
1363				&table_offset);
1364			bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
1365			table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
1366			phys_addr = pci_resource_start(dev, bir) + table_offset;
1367/*
1368 * FIXME! and what did you want to do with phys_addr?
1369 */
1370#endif
1371			iounmap(base);
1372			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1373			       "called without free_irq() on all MSI-X vectors\n",
1374			       pci_name(dev));
1375			BUG_ON(warning > 0);
1376		}
1377		dev->irq = temp;		/* Restore IOAPIC IRQ */
1378	}
1379}
1380
1381void pci_no_msi(void)
1382{
1383	pci_msi_enable = 0;
1384}
1385
1386EXPORT_SYMBOL(pci_enable_msi);
1387EXPORT_SYMBOL(pci_disable_msi);
1388EXPORT_SYMBOL(pci_enable_msix);
1389EXPORT_SYMBOL(pci_disable_msix);
1390