intel-gtt.c revision f050a8abbda0efcd597c6b1825e3b9ce4d613383
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/pagemap.h>
23#include <linux/agp_backend.h>
24#include <asm/smp.h>
25#include "agp.h"
26#include "intel-agp.h"
27#include <drm/intel-gtt.h>
28
29/*
30 * If we have Intel graphics, we're not going to have anything other than
31 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
32 * on the Intel IOMMU support (CONFIG_DMAR).
33 * Only newer chipsets need to bother with this, of course.
34 */
35#ifdef CONFIG_DMAR
36#define USE_PCI_DMA_API 1
37#else
38#define USE_PCI_DMA_API 0
39#endif
40
41struct intel_gtt_driver {
42	unsigned int gen : 8;
43	unsigned int is_g33 : 1;
44	unsigned int is_pineview : 1;
45	unsigned int is_ironlake : 1;
46	unsigned int has_pgtbl_enable : 1;
47	unsigned int dma_mask_size : 8;
48	/* Chipset specific GTT setup */
49	int (*setup)(void);
50	/* This should undo anything done in ->setup() save the unmapping
51	 * of the mmio register file, that's done in the generic code. */
52	void (*cleanup)(void);
53	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
54	/* Flags is a more or less chipset specific opaque value.
55	 * For chipsets that need to support old ums (non-gem) code, this
56	 * needs to be identical to the various supported agp memory types! */
57	bool (*check_flags)(unsigned int flags);
58	void (*chipset_flush)(void);
59};
60
61static struct _intel_private {
62	struct intel_gtt base;
63	const struct intel_gtt_driver *driver;
64	struct pci_dev *pcidev;	/* device one */
65	struct pci_dev *bridge_dev;
66	u8 __iomem *registers;
67	phys_addr_t gtt_bus_addr;
68	phys_addr_t gma_bus_addr;
69	u32 PGETBL_save;
70	u32 __iomem *gtt;		/* I915G */
71	int num_dcache_entries;
72	union {
73		void __iomem *i9xx_flush_page;
74		void *i8xx_flush_page;
75	};
76	char *i81x_gtt_table;
77	struct page *i8xx_page;
78	struct resource ifp_resource;
79	int resource_valid;
80	struct page *scratch_page;
81	dma_addr_t scratch_page_dma;
82} intel_private;
83
84#define INTEL_GTT_GEN	intel_private.driver->gen
85#define IS_G33		intel_private.driver->is_g33
86#define IS_PINEVIEW	intel_private.driver->is_pineview
87#define IS_IRONLAKE	intel_private.driver->is_ironlake
88#define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
89
90static void intel_agp_free_sglist(struct agp_memory *mem)
91{
92	struct sg_table st;
93
94	st.sgl = mem->sg_list;
95	st.orig_nents = st.nents = mem->page_count;
96
97	sg_free_table(&st);
98
99	mem->sg_list = NULL;
100	mem->num_sg = 0;
101}
102
103static int intel_agp_map_memory(struct agp_memory *mem)
104{
105	struct sg_table st;
106	struct scatterlist *sg;
107	int i;
108
109	if (mem->sg_list)
110		return 0; /* already mapped (for e.g. resume */
111
112	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
113
114	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
115		goto err;
116
117	mem->sg_list = sg = st.sgl;
118
119	for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
120		sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
121
122	mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
123				 mem->page_count, PCI_DMA_BIDIRECTIONAL);
124	if (unlikely(!mem->num_sg))
125		goto err;
126
127	return 0;
128
129err:
130	sg_free_table(&st);
131	return -ENOMEM;
132}
133
134static void intel_agp_unmap_memory(struct agp_memory *mem)
135{
136	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
137
138	pci_unmap_sg(intel_private.pcidev, mem->sg_list,
139		     mem->page_count, PCI_DMA_BIDIRECTIONAL);
140	intel_agp_free_sglist(mem);
141}
142
143static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
144{
145	return;
146}
147
148/* Exists to support ARGB cursors */
149static struct page *i8xx_alloc_pages(void)
150{
151	struct page *page;
152
153	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
154	if (page == NULL)
155		return NULL;
156
157	if (set_pages_uc(page, 4) < 0) {
158		set_pages_wb(page, 4);
159		__free_pages(page, 2);
160		return NULL;
161	}
162	get_page(page);
163	atomic_inc(&agp_bridge->current_memory_agp);
164	return page;
165}
166
167static void i8xx_destroy_pages(struct page *page)
168{
169	if (page == NULL)
170		return;
171
172	set_pages_wb(page, 4);
173	put_page(page);
174	__free_pages(page, 2);
175	atomic_dec(&agp_bridge->current_memory_agp);
176}
177
178#define I810_GTT_ORDER 4
179static int i810_setup(void)
180{
181	u32 reg_addr;
182	char *gtt_table;
183
184	/* i81x does not preallocate the gtt. It's always 64kb in size. */
185	gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
186	if (gtt_table == NULL)
187		return -ENOMEM;
188	intel_private.i81x_gtt_table = gtt_table;
189
190	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
191	reg_addr &= 0xfff80000;
192
193	intel_private.registers = ioremap(reg_addr, KB(64));
194	if (!intel_private.registers)
195		return -ENOMEM;
196
197	writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
198	       intel_private.registers+I810_PGETBL_CTL);
199
200	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
201
202	if ((readl(intel_private.registers+I810_DRAM_CTL)
203		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
204		dev_info(&intel_private.pcidev->dev,
205			 "detected 4MB dedicated video ram\n");
206		intel_private.num_dcache_entries = 1024;
207	}
208
209	return 0;
210}
211
212static void i810_cleanup(void)
213{
214	writel(0, intel_private.registers+I810_PGETBL_CTL);
215	free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
216}
217
218static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
219				      int type)
220{
221	int i;
222
223	if ((pg_start + mem->page_count)
224			> intel_private.num_dcache_entries)
225		return -EINVAL;
226
227	if (!mem->is_flushed)
228		global_cache_flush();
229
230	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
231		dma_addr_t addr = i << PAGE_SHIFT;
232		intel_private.driver->write_entry(addr,
233						  i, type);
234	}
235	readl(intel_private.gtt+i-1);
236
237	return 0;
238}
239
240/*
241 * The i810/i830 requires a physical address to program its mouse
242 * pointer into hardware.
243 * However the Xserver still writes to it through the agp aperture.
244 */
245static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
246{
247	struct agp_memory *new;
248	struct page *page;
249
250	switch (pg_count) {
251	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
252		break;
253	case 4:
254		/* kludge to get 4 physical pages for ARGB cursor */
255		page = i8xx_alloc_pages();
256		break;
257	default:
258		return NULL;
259	}
260
261	if (page == NULL)
262		return NULL;
263
264	new = agp_create_memory(pg_count);
265	if (new == NULL)
266		return NULL;
267
268	new->pages[0] = page;
269	if (pg_count == 4) {
270		/* kludge to get 4 physical pages for ARGB cursor */
271		new->pages[1] = new->pages[0] + 1;
272		new->pages[2] = new->pages[1] + 1;
273		new->pages[3] = new->pages[2] + 1;
274	}
275	new->page_count = pg_count;
276	new->num_scratch_pages = pg_count;
277	new->type = AGP_PHYS_MEMORY;
278	new->physical = page_to_phys(new->pages[0]);
279	return new;
280}
281
282static void intel_i810_free_by_type(struct agp_memory *curr)
283{
284	agp_free_key(curr->key);
285	if (curr->type == AGP_PHYS_MEMORY) {
286		if (curr->page_count == 4)
287			i8xx_destroy_pages(curr->pages[0]);
288		else {
289			agp_bridge->driver->agp_destroy_page(curr->pages[0],
290							     AGP_PAGE_DESTROY_UNMAP);
291			agp_bridge->driver->agp_destroy_page(curr->pages[0],
292							     AGP_PAGE_DESTROY_FREE);
293		}
294		agp_free_page_array(curr);
295	}
296	kfree(curr);
297}
298
299static int intel_gtt_setup_scratch_page(void)
300{
301	struct page *page;
302	dma_addr_t dma_addr;
303
304	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
305	if (page == NULL)
306		return -ENOMEM;
307	get_page(page);
308	set_pages_uc(page, 1);
309
310	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
311		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
312				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
313		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
314			return -EINVAL;
315
316		intel_private.scratch_page_dma = dma_addr;
317	} else
318		intel_private.scratch_page_dma = page_to_phys(page);
319
320	intel_private.scratch_page = page;
321
322	return 0;
323}
324
325static void i810_write_entry(dma_addr_t addr, unsigned int entry,
326			     unsigned int flags)
327{
328	u32 pte_flags = I810_PTE_VALID;
329
330	switch (flags) {
331	case AGP_DCACHE_MEMORY:
332		pte_flags |= I810_PTE_LOCAL;
333		break;
334	case AGP_USER_CACHED_MEMORY:
335		pte_flags |= I830_PTE_SYSTEM_CACHED;
336		break;
337	}
338
339	writel(addr | pte_flags, intel_private.gtt + entry);
340}
341
342static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
343	{32, 8192, 3},
344	{64, 16384, 4},
345	{128, 32768, 5},
346	{256, 65536, 6},
347	{512, 131072, 7},
348};
349
350static unsigned int intel_gtt_stolen_size(void)
351{
352	u16 gmch_ctrl;
353	u8 rdct;
354	int local = 0;
355	static const int ddt[4] = { 0, 16, 32, 64 };
356	unsigned int stolen_size = 0;
357
358	if (INTEL_GTT_GEN == 1)
359		return 0; /* no stolen mem on i81x */
360
361	pci_read_config_word(intel_private.bridge_dev,
362			     I830_GMCH_CTRL, &gmch_ctrl);
363
364	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
365	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
366		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
367		case I830_GMCH_GMS_STOLEN_512:
368			stolen_size = KB(512);
369			break;
370		case I830_GMCH_GMS_STOLEN_1024:
371			stolen_size = MB(1);
372			break;
373		case I830_GMCH_GMS_STOLEN_8192:
374			stolen_size = MB(8);
375			break;
376		case I830_GMCH_GMS_LOCAL:
377			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
378			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
379					MB(ddt[I830_RDRAM_DDT(rdct)]);
380			local = 1;
381			break;
382		default:
383			stolen_size = 0;
384			break;
385		}
386	} else if (INTEL_GTT_GEN == 6) {
387		/*
388		 * SandyBridge has new memory control reg at 0x50.w
389		 */
390		u16 snb_gmch_ctl;
391		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
392		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
393		case SNB_GMCH_GMS_STOLEN_32M:
394			stolen_size = MB(32);
395			break;
396		case SNB_GMCH_GMS_STOLEN_64M:
397			stolen_size = MB(64);
398			break;
399		case SNB_GMCH_GMS_STOLEN_96M:
400			stolen_size = MB(96);
401			break;
402		case SNB_GMCH_GMS_STOLEN_128M:
403			stolen_size = MB(128);
404			break;
405		case SNB_GMCH_GMS_STOLEN_160M:
406			stolen_size = MB(160);
407			break;
408		case SNB_GMCH_GMS_STOLEN_192M:
409			stolen_size = MB(192);
410			break;
411		case SNB_GMCH_GMS_STOLEN_224M:
412			stolen_size = MB(224);
413			break;
414		case SNB_GMCH_GMS_STOLEN_256M:
415			stolen_size = MB(256);
416			break;
417		case SNB_GMCH_GMS_STOLEN_288M:
418			stolen_size = MB(288);
419			break;
420		case SNB_GMCH_GMS_STOLEN_320M:
421			stolen_size = MB(320);
422			break;
423		case SNB_GMCH_GMS_STOLEN_352M:
424			stolen_size = MB(352);
425			break;
426		case SNB_GMCH_GMS_STOLEN_384M:
427			stolen_size = MB(384);
428			break;
429		case SNB_GMCH_GMS_STOLEN_416M:
430			stolen_size = MB(416);
431			break;
432		case SNB_GMCH_GMS_STOLEN_448M:
433			stolen_size = MB(448);
434			break;
435		case SNB_GMCH_GMS_STOLEN_480M:
436			stolen_size = MB(480);
437			break;
438		case SNB_GMCH_GMS_STOLEN_512M:
439			stolen_size = MB(512);
440			break;
441		}
442	} else {
443		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
444		case I855_GMCH_GMS_STOLEN_1M:
445			stolen_size = MB(1);
446			break;
447		case I855_GMCH_GMS_STOLEN_4M:
448			stolen_size = MB(4);
449			break;
450		case I855_GMCH_GMS_STOLEN_8M:
451			stolen_size = MB(8);
452			break;
453		case I855_GMCH_GMS_STOLEN_16M:
454			stolen_size = MB(16);
455			break;
456		case I855_GMCH_GMS_STOLEN_32M:
457			stolen_size = MB(32);
458			break;
459		case I915_GMCH_GMS_STOLEN_48M:
460			stolen_size = MB(48);
461			break;
462		case I915_GMCH_GMS_STOLEN_64M:
463			stolen_size = MB(64);
464			break;
465		case G33_GMCH_GMS_STOLEN_128M:
466			stolen_size = MB(128);
467			break;
468		case G33_GMCH_GMS_STOLEN_256M:
469			stolen_size = MB(256);
470			break;
471		case INTEL_GMCH_GMS_STOLEN_96M:
472			stolen_size = MB(96);
473			break;
474		case INTEL_GMCH_GMS_STOLEN_160M:
475			stolen_size = MB(160);
476			break;
477		case INTEL_GMCH_GMS_STOLEN_224M:
478			stolen_size = MB(224);
479			break;
480		case INTEL_GMCH_GMS_STOLEN_352M:
481			stolen_size = MB(352);
482			break;
483		default:
484			stolen_size = 0;
485			break;
486		}
487	}
488
489	if (stolen_size > 0) {
490		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
491		       stolen_size / KB(1), local ? "local" : "stolen");
492	} else {
493		dev_info(&intel_private.bridge_dev->dev,
494		       "no pre-allocated video memory detected\n");
495		stolen_size = 0;
496	}
497
498	return stolen_size;
499}
500
501static void i965_adjust_pgetbl_size(unsigned int size_flag)
502{
503	u32 pgetbl_ctl, pgetbl_ctl2;
504
505	/* ensure that ppgtt is disabled */
506	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
507	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
508	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
509
510	/* write the new ggtt size */
511	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
512	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
513	pgetbl_ctl |= size_flag;
514	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
515}
516
517static unsigned int i965_gtt_total_entries(void)
518{
519	int size;
520	u32 pgetbl_ctl;
521	u16 gmch_ctl;
522
523	pci_read_config_word(intel_private.bridge_dev,
524			     I830_GMCH_CTRL, &gmch_ctl);
525
526	if (INTEL_GTT_GEN == 5) {
527		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
528		case G4x_GMCH_SIZE_1M:
529		case G4x_GMCH_SIZE_VT_1M:
530			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
531			break;
532		case G4x_GMCH_SIZE_VT_1_5M:
533			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
534			break;
535		case G4x_GMCH_SIZE_2M:
536		case G4x_GMCH_SIZE_VT_2M:
537			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
538			break;
539		}
540	}
541
542	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
543
544	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
545	case I965_PGETBL_SIZE_128KB:
546		size = KB(128);
547		break;
548	case I965_PGETBL_SIZE_256KB:
549		size = KB(256);
550		break;
551	case I965_PGETBL_SIZE_512KB:
552		size = KB(512);
553		break;
554	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
555	case I965_PGETBL_SIZE_1MB:
556		size = KB(1024);
557		break;
558	case I965_PGETBL_SIZE_2MB:
559		size = KB(2048);
560		break;
561	case I965_PGETBL_SIZE_1_5MB:
562		size = KB(1024 + 512);
563		break;
564	default:
565		dev_info(&intel_private.pcidev->dev,
566			 "unknown page table size, assuming 512KB\n");
567		size = KB(512);
568	}
569
570	return size/4;
571}
572
573static unsigned int intel_gtt_total_entries(void)
574{
575	int size;
576
577	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
578		return i965_gtt_total_entries();
579	else if (INTEL_GTT_GEN == 6) {
580		u16 snb_gmch_ctl;
581
582		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
583		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
584		default:
585		case SNB_GTT_SIZE_0M:
586			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
587			size = MB(0);
588			break;
589		case SNB_GTT_SIZE_1M:
590			size = MB(1);
591			break;
592		case SNB_GTT_SIZE_2M:
593			size = MB(2);
594			break;
595		}
596		return size/4;
597	} else {
598		/* On previous hardware, the GTT size was just what was
599		 * required to map the aperture.
600		 */
601		return intel_private.base.gtt_mappable_entries;
602	}
603}
604
605static unsigned int intel_gtt_mappable_entries(void)
606{
607	unsigned int aperture_size;
608
609	if (INTEL_GTT_GEN == 1) {
610		u32 smram_miscc;
611
612		pci_read_config_dword(intel_private.bridge_dev,
613				      I810_SMRAM_MISCC, &smram_miscc);
614
615		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
616				== I810_GFX_MEM_WIN_32M)
617			aperture_size = MB(32);
618		else
619			aperture_size = MB(64);
620	} else if (INTEL_GTT_GEN == 2) {
621		u16 gmch_ctrl;
622
623		pci_read_config_word(intel_private.bridge_dev,
624				     I830_GMCH_CTRL, &gmch_ctrl);
625
626		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
627			aperture_size = MB(64);
628		else
629			aperture_size = MB(128);
630	} else {
631		/* 9xx supports large sizes, just look at the length */
632		aperture_size = pci_resource_len(intel_private.pcidev, 2);
633	}
634
635	return aperture_size >> PAGE_SHIFT;
636}
637
638static void intel_gtt_teardown_scratch_page(void)
639{
640	set_pages_wb(intel_private.scratch_page, 1);
641	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
642		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
643	put_page(intel_private.scratch_page);
644	__free_page(intel_private.scratch_page);
645}
646
647static void intel_gtt_cleanup(void)
648{
649	intel_private.driver->cleanup();
650
651	iounmap(intel_private.gtt);
652	iounmap(intel_private.registers);
653
654	intel_gtt_teardown_scratch_page();
655}
656
657static int intel_gtt_init(void)
658{
659	u32 gtt_map_size;
660	int ret;
661
662	ret = intel_private.driver->setup();
663	if (ret != 0)
664		return ret;
665
666	intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
667	intel_private.base.gtt_total_entries = intel_gtt_total_entries();
668
669	/* save the PGETBL reg for resume */
670	intel_private.PGETBL_save =
671		readl(intel_private.registers+I810_PGETBL_CTL)
672			& ~I810_PGETBL_ENABLED;
673	/* we only ever restore the register when enabling the PGTBL... */
674	if (HAS_PGTBL_EN)
675		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
676
677	dev_info(&intel_private.bridge_dev->dev,
678			"detected gtt size: %dK total, %dK mappable\n",
679			intel_private.base.gtt_total_entries * 4,
680			intel_private.base.gtt_mappable_entries * 4);
681
682	gtt_map_size = intel_private.base.gtt_total_entries * 4;
683
684	intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
685				    gtt_map_size);
686	if (!intel_private.gtt) {
687		intel_private.driver->cleanup();
688		iounmap(intel_private.registers);
689		return -ENOMEM;
690	}
691
692	global_cache_flush();   /* FIXME: ? */
693
694	intel_private.base.stolen_size = intel_gtt_stolen_size();
695
696	ret = intel_gtt_setup_scratch_page();
697	if (ret != 0) {
698		intel_gtt_cleanup();
699		return ret;
700	}
701
702	return 0;
703}
704
705static int intel_fake_agp_fetch_size(void)
706{
707	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
708	unsigned int aper_size;
709	int i;
710
711	aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
712		    / MB(1);
713
714	for (i = 0; i < num_sizes; i++) {
715		if (aper_size == intel_fake_agp_sizes[i].size) {
716			agp_bridge->current_size =
717				(void *) (intel_fake_agp_sizes + i);
718			return aper_size;
719		}
720	}
721
722	return 0;
723}
724
725static void i830_cleanup(void)
726{
727	kunmap(intel_private.i8xx_page);
728	intel_private.i8xx_flush_page = NULL;
729
730	__free_page(intel_private.i8xx_page);
731	intel_private.i8xx_page = NULL;
732}
733
734static void intel_i830_setup_flush(void)
735{
736	/* return if we've already set the flush mechanism up */
737	if (intel_private.i8xx_page)
738		return;
739
740	intel_private.i8xx_page = alloc_page(GFP_KERNEL);
741	if (!intel_private.i8xx_page)
742		return;
743
744	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
745	if (!intel_private.i8xx_flush_page)
746		i830_cleanup();
747}
748
749/* The chipset_flush interface needs to get data that has already been
750 * flushed out of the CPU all the way out to main memory, because the GPU
751 * doesn't snoop those buffers.
752 *
753 * The 8xx series doesn't have the same lovely interface for flushing the
754 * chipset write buffers that the later chips do. According to the 865
755 * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
756 * that buffer out, we just fill 1KB and clflush it out, on the assumption
757 * that it'll push whatever was in there out.  It appears to work.
758 */
759static void i830_chipset_flush(void)
760{
761	unsigned int *pg = intel_private.i8xx_flush_page;
762
763	memset(pg, 0, 1024);
764
765	if (cpu_has_clflush)
766		clflush_cache_range(pg, 1024);
767	else if (wbinvd_on_all_cpus() != 0)
768		printk(KERN_ERR "Timed out waiting for cache flush.\n");
769}
770
771static void i830_write_entry(dma_addr_t addr, unsigned int entry,
772			     unsigned int flags)
773{
774	u32 pte_flags = I810_PTE_VALID;
775
776	if (flags ==  AGP_USER_CACHED_MEMORY)
777		pte_flags |= I830_PTE_SYSTEM_CACHED;
778
779	writel(addr | pte_flags, intel_private.gtt + entry);
780}
781
782static bool intel_enable_gtt(void)
783{
784	u32 gma_addr;
785	u8 __iomem *reg;
786
787	if (INTEL_GTT_GEN <= 2)
788		pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
789				      &gma_addr);
790	else
791		pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
792				      &gma_addr);
793
794	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
795
796	if (INTEL_GTT_GEN >= 6)
797	    return true;
798
799	if (INTEL_GTT_GEN == 2) {
800		u16 gmch_ctrl;
801
802		pci_read_config_word(intel_private.bridge_dev,
803				     I830_GMCH_CTRL, &gmch_ctrl);
804		gmch_ctrl |= I830_GMCH_ENABLED;
805		pci_write_config_word(intel_private.bridge_dev,
806				      I830_GMCH_CTRL, gmch_ctrl);
807
808		pci_read_config_word(intel_private.bridge_dev,
809				     I830_GMCH_CTRL, &gmch_ctrl);
810		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
811			dev_err(&intel_private.pcidev->dev,
812				"failed to enable the GTT: GMCH_CTRL=%x\n",
813				gmch_ctrl);
814			return false;
815		}
816	}
817
818	reg = intel_private.registers+I810_PGETBL_CTL;
819	writel(intel_private.PGETBL_save, reg);
820	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
821		dev_err(&intel_private.pcidev->dev,
822			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
823			readl(reg), intel_private.PGETBL_save);
824		return false;
825	}
826
827	return true;
828}
829
830static int i830_setup(void)
831{
832	u32 reg_addr;
833
834	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
835	reg_addr &= 0xfff80000;
836
837	intel_private.registers = ioremap(reg_addr, KB(64));
838	if (!intel_private.registers)
839		return -ENOMEM;
840
841	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
842
843	intel_i830_setup_flush();
844
845	return 0;
846}
847
848static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
849{
850	agp_bridge->gatt_table_real = NULL;
851	agp_bridge->gatt_table = NULL;
852	agp_bridge->gatt_bus_addr = 0;
853
854	return 0;
855}
856
857static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
858{
859	return 0;
860}
861
862static int intel_fake_agp_configure(void)
863{
864	int i;
865
866	if (!intel_enable_gtt())
867	    return -EIO;
868
869	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
870
871	for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
872		intel_private.driver->write_entry(intel_private.scratch_page_dma,
873						  i, 0);
874	}
875	readl(intel_private.gtt+i-1);	/* PCI Posting. */
876
877	global_cache_flush();
878
879	return 0;
880}
881
882static bool i830_check_flags(unsigned int flags)
883{
884	switch (flags) {
885	case 0:
886	case AGP_PHYS_MEMORY:
887	case AGP_USER_CACHED_MEMORY:
888	case AGP_USER_MEMORY:
889		return true;
890	}
891
892	return false;
893}
894
895static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
896					unsigned int sg_len,
897					unsigned int pg_start,
898					unsigned int flags)
899{
900	struct scatterlist *sg;
901	unsigned int len, m;
902	int i, j;
903
904	j = pg_start;
905
906	/* sg may merge pages, but we have to separate
907	 * per-page addr for GTT */
908	for_each_sg(sg_list, sg, sg_len, i) {
909		len = sg_dma_len(sg) >> PAGE_SHIFT;
910		for (m = 0; m < len; m++) {
911			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
912			intel_private.driver->write_entry(addr,
913							  j, flags);
914			j++;
915		}
916	}
917	readl(intel_private.gtt+j-1);
918}
919
920static int intel_fake_agp_insert_entries(struct agp_memory *mem,
921					 off_t pg_start, int type)
922{
923	int i, j;
924	int ret = -EINVAL;
925
926	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
927		return i810_insert_dcache_entries(mem, pg_start, type);
928
929	if (mem->page_count == 0)
930		goto out;
931
932	if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
933		goto out_err;
934
935	if (type != mem->type)
936		goto out_err;
937
938	if (!intel_private.driver->check_flags(type))
939		goto out_err;
940
941	if (!mem->is_flushed)
942		global_cache_flush();
943
944	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
945		ret = intel_agp_map_memory(mem);
946		if (ret != 0)
947			return ret;
948
949		intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
950					    pg_start, type);
951	} else {
952		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
953			dma_addr_t addr = page_to_phys(mem->pages[i]);
954			intel_private.driver->write_entry(addr,
955							  j, type);
956		}
957		readl(intel_private.gtt+j-1);
958	}
959
960out:
961	ret = 0;
962out_err:
963	mem->is_flushed = true;
964	return ret;
965}
966
967static int intel_fake_agp_remove_entries(struct agp_memory *mem,
968					 off_t pg_start, int type)
969{
970	int i;
971
972	if (mem->page_count == 0)
973		return 0;
974
975	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
976		intel_agp_unmap_memory(mem);
977
978	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
979		intel_private.driver->write_entry(intel_private.scratch_page_dma,
980						  i, 0);
981	}
982	readl(intel_private.gtt+i-1);
983
984	return 0;
985}
986
987static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
988						       int type)
989{
990	struct agp_memory *new;
991
992	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
993		if (pg_count != intel_private.num_dcache_entries)
994			return NULL;
995
996		new = agp_create_memory(1);
997		if (new == NULL)
998			return NULL;
999
1000		new->type = AGP_DCACHE_MEMORY;
1001		new->page_count = pg_count;
1002		new->num_scratch_pages = 0;
1003		agp_free_page_array(new);
1004		return new;
1005	}
1006	if (type == AGP_PHYS_MEMORY)
1007		return alloc_agpphysmem_i8xx(pg_count, type);
1008	/* always return NULL for other allocation types for now */
1009	return NULL;
1010}
1011
1012static int intel_alloc_chipset_flush_resource(void)
1013{
1014	int ret;
1015	ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1016				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1017				     pcibios_align_resource, intel_private.bridge_dev);
1018
1019	return ret;
1020}
1021
1022static void intel_i915_setup_chipset_flush(void)
1023{
1024	int ret;
1025	u32 temp;
1026
1027	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1028	if (!(temp & 0x1)) {
1029		intel_alloc_chipset_flush_resource();
1030		intel_private.resource_valid = 1;
1031		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1032	} else {
1033		temp &= ~1;
1034
1035		intel_private.resource_valid = 1;
1036		intel_private.ifp_resource.start = temp;
1037		intel_private.ifp_resource.end = temp + PAGE_SIZE;
1038		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1039		/* some BIOSes reserve this area in a pnp some don't */
1040		if (ret)
1041			intel_private.resource_valid = 0;
1042	}
1043}
1044
1045static void intel_i965_g33_setup_chipset_flush(void)
1046{
1047	u32 temp_hi, temp_lo;
1048	int ret;
1049
1050	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1051	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1052
1053	if (!(temp_lo & 0x1)) {
1054
1055		intel_alloc_chipset_flush_resource();
1056
1057		intel_private.resource_valid = 1;
1058		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1059			upper_32_bits(intel_private.ifp_resource.start));
1060		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1061	} else {
1062		u64 l64;
1063
1064		temp_lo &= ~0x1;
1065		l64 = ((u64)temp_hi << 32) | temp_lo;
1066
1067		intel_private.resource_valid = 1;
1068		intel_private.ifp_resource.start = l64;
1069		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1070		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1071		/* some BIOSes reserve this area in a pnp some don't */
1072		if (ret)
1073			intel_private.resource_valid = 0;
1074	}
1075}
1076
1077static void intel_i9xx_setup_flush(void)
1078{
1079	/* return if already configured */
1080	if (intel_private.ifp_resource.start)
1081		return;
1082
1083	if (INTEL_GTT_GEN == 6)
1084		return;
1085
1086	/* setup a resource for this object */
1087	intel_private.ifp_resource.name = "Intel Flush Page";
1088	intel_private.ifp_resource.flags = IORESOURCE_MEM;
1089
1090	/* Setup chipset flush for 915 */
1091	if (IS_G33 || INTEL_GTT_GEN >= 4) {
1092		intel_i965_g33_setup_chipset_flush();
1093	} else {
1094		intel_i915_setup_chipset_flush();
1095	}
1096
1097	if (intel_private.ifp_resource.start)
1098		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1099	if (!intel_private.i9xx_flush_page)
1100		dev_err(&intel_private.pcidev->dev,
1101			"can't ioremap flush page - no chipset flushing\n");
1102}
1103
1104static void i9xx_cleanup(void)
1105{
1106	if (intel_private.i9xx_flush_page)
1107		iounmap(intel_private.i9xx_flush_page);
1108	if (intel_private.resource_valid)
1109		release_resource(&intel_private.ifp_resource);
1110	intel_private.ifp_resource.start = 0;
1111	intel_private.resource_valid = 0;
1112}
1113
1114static void i9xx_chipset_flush(void)
1115{
1116	if (intel_private.i9xx_flush_page)
1117		writel(1, intel_private.i9xx_flush_page);
1118}
1119
1120static void i965_write_entry(dma_addr_t addr, unsigned int entry,
1121			     unsigned int flags)
1122{
1123	/* Shift high bits down */
1124	addr |= (addr >> 28) & 0xf0;
1125	writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
1126}
1127
1128static bool gen6_check_flags(unsigned int flags)
1129{
1130	return true;
1131}
1132
1133static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1134			     unsigned int flags)
1135{
1136	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1137	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1138	u32 pte_flags;
1139
1140	if (type_mask == AGP_USER_MEMORY)
1141		pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1142	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1143		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1144		if (gfdt)
1145			pte_flags |= GEN6_PTE_GFDT;
1146	} else { /* set 'normal'/'cached' to LLC by default */
1147		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1148		if (gfdt)
1149			pte_flags |= GEN6_PTE_GFDT;
1150	}
1151
1152	/* gen6 has bit11-4 for physical addr bit39-32 */
1153	addr |= (addr >> 28) & 0xff0;
1154	writel(addr | pte_flags, intel_private.gtt + entry);
1155}
1156
1157static void gen6_cleanup(void)
1158{
1159}
1160
1161static int i9xx_setup(void)
1162{
1163	u32 reg_addr;
1164
1165	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1166
1167	reg_addr &= 0xfff80000;
1168
1169	intel_private.registers = ioremap(reg_addr, 128 * 4096);
1170	if (!intel_private.registers)
1171		return -ENOMEM;
1172
1173	if (INTEL_GTT_GEN == 3) {
1174		u32 gtt_addr;
1175
1176		pci_read_config_dword(intel_private.pcidev,
1177				      I915_PTEADDR, &gtt_addr);
1178		intel_private.gtt_bus_addr = gtt_addr;
1179	} else {
1180		u32 gtt_offset;
1181
1182		switch (INTEL_GTT_GEN) {
1183		case 5:
1184		case 6:
1185			gtt_offset = MB(2);
1186			break;
1187		case 4:
1188		default:
1189			gtt_offset =  KB(512);
1190			break;
1191		}
1192		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1193	}
1194
1195	intel_i9xx_setup_flush();
1196
1197	return 0;
1198}
1199
1200static const struct agp_bridge_driver intel_fake_agp_driver = {
1201	.owner			= THIS_MODULE,
1202	.size_type		= FIXED_APER_SIZE,
1203	.aperture_sizes		= intel_fake_agp_sizes,
1204	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1205	.configure		= intel_fake_agp_configure,
1206	.fetch_size		= intel_fake_agp_fetch_size,
1207	.cleanup		= intel_gtt_cleanup,
1208	.agp_enable		= intel_fake_agp_enable,
1209	.cache_flush		= global_cache_flush,
1210	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1211	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1212	.insert_memory		= intel_fake_agp_insert_entries,
1213	.remove_memory		= intel_fake_agp_remove_entries,
1214	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1215	.free_by_type		= intel_i810_free_by_type,
1216	.agp_alloc_page		= agp_generic_alloc_page,
1217	.agp_alloc_pages        = agp_generic_alloc_pages,
1218	.agp_destroy_page	= agp_generic_destroy_page,
1219	.agp_destroy_pages      = agp_generic_destroy_pages,
1220};
1221
1222static const struct intel_gtt_driver i81x_gtt_driver = {
1223	.gen = 1,
1224	.has_pgtbl_enable = 1,
1225	.dma_mask_size = 32,
1226	.setup = i810_setup,
1227	.cleanup = i810_cleanup,
1228	.check_flags = i830_check_flags,
1229	.write_entry = i810_write_entry,
1230};
1231static const struct intel_gtt_driver i8xx_gtt_driver = {
1232	.gen = 2,
1233	.has_pgtbl_enable = 1,
1234	.setup = i830_setup,
1235	.cleanup = i830_cleanup,
1236	.write_entry = i830_write_entry,
1237	.dma_mask_size = 32,
1238	.check_flags = i830_check_flags,
1239	.chipset_flush = i830_chipset_flush,
1240};
1241static const struct intel_gtt_driver i915_gtt_driver = {
1242	.gen = 3,
1243	.has_pgtbl_enable = 1,
1244	.setup = i9xx_setup,
1245	.cleanup = i9xx_cleanup,
1246	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1247	.write_entry = i830_write_entry,
1248	.dma_mask_size = 32,
1249	.check_flags = i830_check_flags,
1250	.chipset_flush = i9xx_chipset_flush,
1251};
1252static const struct intel_gtt_driver g33_gtt_driver = {
1253	.gen = 3,
1254	.is_g33 = 1,
1255	.setup = i9xx_setup,
1256	.cleanup = i9xx_cleanup,
1257	.write_entry = i965_write_entry,
1258	.dma_mask_size = 36,
1259	.check_flags = i830_check_flags,
1260	.chipset_flush = i9xx_chipset_flush,
1261};
1262static const struct intel_gtt_driver pineview_gtt_driver = {
1263	.gen = 3,
1264	.is_pineview = 1, .is_g33 = 1,
1265	.setup = i9xx_setup,
1266	.cleanup = i9xx_cleanup,
1267	.write_entry = i965_write_entry,
1268	.dma_mask_size = 36,
1269	.check_flags = i830_check_flags,
1270	.chipset_flush = i9xx_chipset_flush,
1271};
1272static const struct intel_gtt_driver i965_gtt_driver = {
1273	.gen = 4,
1274	.has_pgtbl_enable = 1,
1275	.setup = i9xx_setup,
1276	.cleanup = i9xx_cleanup,
1277	.write_entry = i965_write_entry,
1278	.dma_mask_size = 36,
1279	.check_flags = i830_check_flags,
1280	.chipset_flush = i9xx_chipset_flush,
1281};
1282static const struct intel_gtt_driver g4x_gtt_driver = {
1283	.gen = 5,
1284	.setup = i9xx_setup,
1285	.cleanup = i9xx_cleanup,
1286	.write_entry = i965_write_entry,
1287	.dma_mask_size = 36,
1288	.check_flags = i830_check_flags,
1289	.chipset_flush = i9xx_chipset_flush,
1290};
1291static const struct intel_gtt_driver ironlake_gtt_driver = {
1292	.gen = 5,
1293	.is_ironlake = 1,
1294	.setup = i9xx_setup,
1295	.cleanup = i9xx_cleanup,
1296	.write_entry = i965_write_entry,
1297	.dma_mask_size = 36,
1298	.check_flags = i830_check_flags,
1299	.chipset_flush = i9xx_chipset_flush,
1300};
1301static const struct intel_gtt_driver sandybridge_gtt_driver = {
1302	.gen = 6,
1303	.setup = i9xx_setup,
1304	.cleanup = gen6_cleanup,
1305	.write_entry = gen6_write_entry,
1306	.dma_mask_size = 40,
1307	.check_flags = gen6_check_flags,
1308	.chipset_flush = i9xx_chipset_flush,
1309};
1310
1311/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1312 * driver and gmch_driver must be non-null, and find_gmch will determine
1313 * which one should be used if a gmch_chip_id is present.
1314 */
1315static const struct intel_gtt_driver_description {
1316	unsigned int gmch_chip_id;
1317	char *name;
1318	const struct intel_gtt_driver *gtt_driver;
1319} intel_gtt_chipsets[] = {
1320	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1321		&i81x_gtt_driver},
1322	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1323		&i81x_gtt_driver},
1324	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1325		&i81x_gtt_driver},
1326	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1327		&i81x_gtt_driver},
1328	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1329		&i8xx_gtt_driver},
1330	{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
1331		&i8xx_gtt_driver},
1332	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
1333		&i8xx_gtt_driver},
1334	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1335		&i8xx_gtt_driver},
1336	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
1337		&i8xx_gtt_driver},
1338	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1339		&i915_gtt_driver },
1340	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1341		&i915_gtt_driver },
1342	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1343		&i915_gtt_driver },
1344	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1345		&i915_gtt_driver },
1346	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1347		&i915_gtt_driver },
1348	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1349		&i915_gtt_driver },
1350	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1351		&i965_gtt_driver },
1352	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1353		&i965_gtt_driver },
1354	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1355		&i965_gtt_driver },
1356	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1357		&i965_gtt_driver },
1358	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1359		&i965_gtt_driver },
1360	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1361		&i965_gtt_driver },
1362	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1363		&g33_gtt_driver },
1364	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1365		&g33_gtt_driver },
1366	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1367		&g33_gtt_driver },
1368	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1369		&pineview_gtt_driver },
1370	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1371		&pineview_gtt_driver },
1372	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1373		&g4x_gtt_driver },
1374	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1375		&g4x_gtt_driver },
1376	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1377		&g4x_gtt_driver },
1378	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1379		&g4x_gtt_driver },
1380	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1381		&g4x_gtt_driver },
1382	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1383		&g4x_gtt_driver },
1384	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1385		&g4x_gtt_driver },
1386	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1387	    "HD Graphics", &ironlake_gtt_driver },
1388	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1389	    "HD Graphics", &ironlake_gtt_driver },
1390	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1391	    "Sandybridge", &sandybridge_gtt_driver },
1392	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1393	    "Sandybridge", &sandybridge_gtt_driver },
1394	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1395	    "Sandybridge", &sandybridge_gtt_driver },
1396	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1397	    "Sandybridge", &sandybridge_gtt_driver },
1398	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1399	    "Sandybridge", &sandybridge_gtt_driver },
1400	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1401	    "Sandybridge", &sandybridge_gtt_driver },
1402	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1403	    "Sandybridge", &sandybridge_gtt_driver },
1404	{ 0, NULL, NULL }
1405};
1406
1407static int find_gmch(u16 device)
1408{
1409	struct pci_dev *gmch_device;
1410
1411	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1412	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1413		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1414					     device, gmch_device);
1415	}
1416
1417	if (!gmch_device)
1418		return 0;
1419
1420	intel_private.pcidev = gmch_device;
1421	return 1;
1422}
1423
1424int intel_gmch_probe(struct pci_dev *pdev,
1425				      struct agp_bridge_data *bridge)
1426{
1427	int i, mask;
1428	intel_private.driver = NULL;
1429
1430	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1431		if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1432			intel_private.driver =
1433				intel_gtt_chipsets[i].gtt_driver;
1434			break;
1435		}
1436	}
1437
1438	if (!intel_private.driver)
1439		return 0;
1440
1441	bridge->driver = &intel_fake_agp_driver;
1442	bridge->dev_private_data = &intel_private;
1443	bridge->dev = pdev;
1444
1445	intel_private.bridge_dev = pci_dev_get(pdev);
1446
1447	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1448
1449	mask = intel_private.driver->dma_mask_size;
1450	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1451		dev_err(&intel_private.pcidev->dev,
1452			"set gfx device dma mask %d-bit failed!\n", mask);
1453	else
1454		pci_set_consistent_dma_mask(intel_private.pcidev,
1455					    DMA_BIT_MASK(mask));
1456
1457	/*if (bridge->driver == &intel_810_driver)
1458		return 1;*/
1459
1460	if (intel_gtt_init() != 0)
1461		return 0;
1462
1463	return 1;
1464}
1465EXPORT_SYMBOL(intel_gmch_probe);
1466
1467const struct intel_gtt *intel_gtt_get(void)
1468{
1469	return &intel_private.base;
1470}
1471EXPORT_SYMBOL(intel_gtt_get);
1472
1473void intel_gtt_chipset_flush(void)
1474{
1475	if (intel_private.driver->chipset_flush)
1476		intel_private.driver->chipset_flush();
1477}
1478EXPORT_SYMBOL(intel_gtt_chipset_flush);
1479
1480void intel_gmch_remove(struct pci_dev *pdev)
1481{
1482	if (intel_private.pcidev)
1483		pci_dev_put(intel_private.pcidev);
1484	if (intel_private.bridge_dev)
1485		pci_dev_put(intel_private.bridge_dev);
1486}
1487EXPORT_SYMBOL(intel_gmch_remove);
1488
1489MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1490MODULE_LICENSE("GPL and additional rights");
1491