intel-gtt.c revision e9b1cc81c2222108d866323c51f482dd6db8d689
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/pagemap.h>
23#include <linux/agp_backend.h>
24#include <asm/smp.h>
25#include "agp.h"
26#include "intel-agp.h"
27#include <linux/intel-gtt.h>
28#include <drm/intel-gtt.h>
29
30/*
31 * If we have Intel graphics, we're not going to have anything other than
32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33 * on the Intel IOMMU support (CONFIG_DMAR).
34 * Only newer chipsets need to bother with this, of course.
35 */
36#ifdef CONFIG_DMAR
37#define USE_PCI_DMA_API 1
38#else
39#define USE_PCI_DMA_API 0
40#endif
41
42/* Max amount of stolen space, anything above will be returned to Linux */
43int intel_max_stolen = 32 * 1024 * 1024;
44EXPORT_SYMBOL(intel_max_stolen);
45
46static const struct aper_size_info_fixed intel_i810_sizes[] =
47{
48	{64, 16384, 4},
49	/* The 32M mode still requires a 64k gatt */
50	{32, 8192, 4}
51};
52
53#define AGP_DCACHE_MEMORY	1
54#define AGP_PHYS_MEMORY		2
55#define INTEL_AGP_CACHED_MEMORY 3
56
57static struct gatt_mask intel_i810_masks[] =
58{
59	{.mask = I810_PTE_VALID, .type = 0},
60	{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
61	{.mask = I810_PTE_VALID, .type = 0},
62	{.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
63	 .type = INTEL_AGP_CACHED_MEMORY}
64};
65
66#define INTEL_AGP_UNCACHED_MEMORY              0
67#define INTEL_AGP_CACHED_MEMORY_LLC            1
68#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT       2
69#define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
70#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
71
72struct intel_gtt_driver {
73	unsigned int gen : 8;
74	unsigned int is_g33 : 1;
75	unsigned int is_pineview : 1;
76	unsigned int is_ironlake : 1;
77	/* Chipset specific GTT setup */
78	int (*setup)(void);
79	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
80	/* Flags is a more or less chipset specific opaque value.
81	 * For chipsets that need to support old ums (non-gem) code, this
82	 * needs to be identical to the various supported agp memory types! */
83	bool (*check_flags)(unsigned int flags);
84	void (*chipset_flush)(void);
85};
86
87static struct _intel_private {
88	struct intel_gtt base;
89	const struct intel_gtt_driver *driver;
90	struct pci_dev *pcidev;	/* device one */
91	struct pci_dev *bridge_dev;
92	u8 __iomem *registers;
93	phys_addr_t gtt_bus_addr;
94	phys_addr_t gma_bus_addr;
95	phys_addr_t pte_bus_addr;
96	u32 __iomem *gtt;		/* I915G */
97	int num_dcache_entries;
98	union {
99		void __iomem *i9xx_flush_page;
100		void *i8xx_flush_page;
101	};
102	struct page *i8xx_page;
103	struct resource ifp_resource;
104	int resource_valid;
105	struct page *scratch_page;
106	dma_addr_t scratch_page_dma;
107} intel_private;
108
109#define INTEL_GTT_GEN	intel_private.driver->gen
110#define IS_G33		intel_private.driver->is_g33
111#define IS_PINEVIEW	intel_private.driver->is_pineview
112#define IS_IRONLAKE	intel_private.driver->is_ironlake
113
114static void intel_agp_free_sglist(struct agp_memory *mem)
115{
116	struct sg_table st;
117
118	st.sgl = mem->sg_list;
119	st.orig_nents = st.nents = mem->page_count;
120
121	sg_free_table(&st);
122
123	mem->sg_list = NULL;
124	mem->num_sg = 0;
125}
126
127static int intel_agp_map_memory(struct agp_memory *mem)
128{
129	struct sg_table st;
130	struct scatterlist *sg;
131	int i;
132
133	if (mem->sg_list)
134		return 0; /* already mapped (for e.g. resume */
135
136	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
137
138	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
139		goto err;
140
141	mem->sg_list = sg = st.sgl;
142
143	for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
144		sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
145
146	mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
147				 mem->page_count, PCI_DMA_BIDIRECTIONAL);
148	if (unlikely(!mem->num_sg))
149		goto err;
150
151	return 0;
152
153err:
154	sg_free_table(&st);
155	return -ENOMEM;
156}
157
158static void intel_agp_unmap_memory(struct agp_memory *mem)
159{
160	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
161
162	pci_unmap_sg(intel_private.pcidev, mem->sg_list,
163		     mem->page_count, PCI_DMA_BIDIRECTIONAL);
164	intel_agp_free_sglist(mem);
165}
166
167static int intel_i810_fetch_size(void)
168{
169	u32 smram_miscc;
170	struct aper_size_info_fixed *values;
171
172	pci_read_config_dword(intel_private.bridge_dev,
173			      I810_SMRAM_MISCC, &smram_miscc);
174	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
175
176	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
177		dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
178		return 0;
179	}
180	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
181		agp_bridge->current_size = (void *) (values + 1);
182		agp_bridge->aperture_size_idx = 1;
183		return values[1].size;
184	} else {
185		agp_bridge->current_size = (void *) (values);
186		agp_bridge->aperture_size_idx = 0;
187		return values[0].size;
188	}
189
190	return 0;
191}
192
193static int intel_i810_configure(void)
194{
195	struct aper_size_info_fixed *current_size;
196	u32 temp;
197	int i;
198
199	current_size = A_SIZE_FIX(agp_bridge->current_size);
200
201	if (!intel_private.registers) {
202		pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
203		temp &= 0xfff80000;
204
205		intel_private.registers = ioremap(temp, 128 * 4096);
206		if (!intel_private.registers) {
207			dev_err(&intel_private.pcidev->dev,
208				"can't remap memory\n");
209			return -ENOMEM;
210		}
211	}
212
213	if ((readl(intel_private.registers+I810_DRAM_CTL)
214		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
215		/* This will need to be dynamically assigned */
216		dev_info(&intel_private.pcidev->dev,
217			 "detected 4MB dedicated video ram\n");
218		intel_private.num_dcache_entries = 1024;
219	}
220	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
221	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
222	writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
223	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
224
225	if (agp_bridge->driver->needs_scratch_page) {
226		for (i = 0; i < current_size->num_entries; i++) {
227			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
228		}
229		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI posting. */
230	}
231	global_cache_flush();
232	return 0;
233}
234
235static void intel_i810_cleanup(void)
236{
237	writel(0, intel_private.registers+I810_PGETBL_CTL);
238	readl(intel_private.registers);	/* PCI Posting. */
239	iounmap(intel_private.registers);
240}
241
242static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
243{
244	return;
245}
246
247/* Exists to support ARGB cursors */
248static struct page *i8xx_alloc_pages(void)
249{
250	struct page *page;
251
252	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
253	if (page == NULL)
254		return NULL;
255
256	if (set_pages_uc(page, 4) < 0) {
257		set_pages_wb(page, 4);
258		__free_pages(page, 2);
259		return NULL;
260	}
261	get_page(page);
262	atomic_inc(&agp_bridge->current_memory_agp);
263	return page;
264}
265
266static void i8xx_destroy_pages(struct page *page)
267{
268	if (page == NULL)
269		return;
270
271	set_pages_wb(page, 4);
272	put_page(page);
273	__free_pages(page, 2);
274	atomic_dec(&agp_bridge->current_memory_agp);
275}
276
277static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
278				int type)
279{
280	int i, j, num_entries;
281	void *temp;
282	int ret = -EINVAL;
283	int mask_type;
284
285	if (mem->page_count == 0)
286		goto out;
287
288	temp = agp_bridge->current_size;
289	num_entries = A_SIZE_FIX(temp)->num_entries;
290
291	if ((pg_start + mem->page_count) > num_entries)
292		goto out_err;
293
294
295	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
296		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
297			ret = -EBUSY;
298			goto out_err;
299		}
300	}
301
302	if (type != mem->type)
303		goto out_err;
304
305	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
306
307	switch (mask_type) {
308	case AGP_DCACHE_MEMORY:
309		if (!mem->is_flushed)
310			global_cache_flush();
311		for (i = pg_start; i < (pg_start + mem->page_count); i++) {
312			writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
313			       intel_private.registers+I810_PTE_BASE+(i*4));
314		}
315		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
316		break;
317	case AGP_PHYS_MEMORY:
318	case AGP_NORMAL_MEMORY:
319		if (!mem->is_flushed)
320			global_cache_flush();
321		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
322			writel(agp_bridge->driver->mask_memory(agp_bridge,
323					page_to_phys(mem->pages[i]), mask_type),
324			       intel_private.registers+I810_PTE_BASE+(j*4));
325		}
326		readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
327		break;
328	default:
329		goto out_err;
330	}
331
332out:
333	ret = 0;
334out_err:
335	mem->is_flushed = true;
336	return ret;
337}
338
339static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
340				int type)
341{
342	int i;
343
344	if (mem->page_count == 0)
345		return 0;
346
347	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
348		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
349	}
350	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
351
352	return 0;
353}
354
355/*
356 * The i810/i830 requires a physical address to program its mouse
357 * pointer into hardware.
358 * However the Xserver still writes to it through the agp aperture.
359 */
360static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
361{
362	struct agp_memory *new;
363	struct page *page;
364
365	switch (pg_count) {
366	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
367		break;
368	case 4:
369		/* kludge to get 4 physical pages for ARGB cursor */
370		page = i8xx_alloc_pages();
371		break;
372	default:
373		return NULL;
374	}
375
376	if (page == NULL)
377		return NULL;
378
379	new = agp_create_memory(pg_count);
380	if (new == NULL)
381		return NULL;
382
383	new->pages[0] = page;
384	if (pg_count == 4) {
385		/* kludge to get 4 physical pages for ARGB cursor */
386		new->pages[1] = new->pages[0] + 1;
387		new->pages[2] = new->pages[1] + 1;
388		new->pages[3] = new->pages[2] + 1;
389	}
390	new->page_count = pg_count;
391	new->num_scratch_pages = pg_count;
392	new->type = AGP_PHYS_MEMORY;
393	new->physical = page_to_phys(new->pages[0]);
394	return new;
395}
396
397static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
398{
399	struct agp_memory *new;
400
401	if (type == AGP_DCACHE_MEMORY) {
402		if (pg_count != intel_private.num_dcache_entries)
403			return NULL;
404
405		new = agp_create_memory(1);
406		if (new == NULL)
407			return NULL;
408
409		new->type = AGP_DCACHE_MEMORY;
410		new->page_count = pg_count;
411		new->num_scratch_pages = 0;
412		agp_free_page_array(new);
413		return new;
414	}
415	if (type == AGP_PHYS_MEMORY)
416		return alloc_agpphysmem_i8xx(pg_count, type);
417	return NULL;
418}
419
420static void intel_i810_free_by_type(struct agp_memory *curr)
421{
422	agp_free_key(curr->key);
423	if (curr->type == AGP_PHYS_MEMORY) {
424		if (curr->page_count == 4)
425			i8xx_destroy_pages(curr->pages[0]);
426		else {
427			agp_bridge->driver->agp_destroy_page(curr->pages[0],
428							     AGP_PAGE_DESTROY_UNMAP);
429			agp_bridge->driver->agp_destroy_page(curr->pages[0],
430							     AGP_PAGE_DESTROY_FREE);
431		}
432		agp_free_page_array(curr);
433	}
434	kfree(curr);
435}
436
437static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
438					    dma_addr_t addr, int type)
439{
440	/* Type checking must be done elsewhere */
441	return addr | bridge->driver->masks[type].mask;
442}
443
444static int intel_gtt_setup_scratch_page(void)
445{
446	struct page *page;
447	dma_addr_t dma_addr;
448
449	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
450	if (page == NULL)
451		return -ENOMEM;
452	get_page(page);
453	set_pages_uc(page, 1);
454
455	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
456		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
457				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
458		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
459			return -EINVAL;
460
461		intel_private.scratch_page_dma = dma_addr;
462	} else
463		intel_private.scratch_page_dma = page_to_phys(page);
464
465	intel_private.scratch_page = page;
466
467	return 0;
468}
469
470static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
471	{128, 32768, 5},
472	/* The 64M mode still requires a 128k gatt */
473	{64, 16384, 5},
474	{256, 65536, 6},
475	{512, 131072, 7},
476};
477
478static unsigned int intel_gtt_stolen_entries(void)
479{
480	u16 gmch_ctrl;
481	u8 rdct;
482	int local = 0;
483	static const int ddt[4] = { 0, 16, 32, 64 };
484	unsigned int overhead_entries, stolen_entries;
485	unsigned int stolen_size = 0;
486
487	pci_read_config_word(intel_private.bridge_dev,
488			     I830_GMCH_CTRL, &gmch_ctrl);
489
490	if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
491		overhead_entries = 0;
492	else
493		overhead_entries = intel_private.base.gtt_mappable_entries
494			/ 1024;
495
496	overhead_entries += 1; /* BIOS popup */
497
498	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
499	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
500		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
501		case I830_GMCH_GMS_STOLEN_512:
502			stolen_size = KB(512);
503			break;
504		case I830_GMCH_GMS_STOLEN_1024:
505			stolen_size = MB(1);
506			break;
507		case I830_GMCH_GMS_STOLEN_8192:
508			stolen_size = MB(8);
509			break;
510		case I830_GMCH_GMS_LOCAL:
511			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
512			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
513					MB(ddt[I830_RDRAM_DDT(rdct)]);
514			local = 1;
515			break;
516		default:
517			stolen_size = 0;
518			break;
519		}
520	} else if (INTEL_GTT_GEN == 6) {
521		/*
522		 * SandyBridge has new memory control reg at 0x50.w
523		 */
524		u16 snb_gmch_ctl;
525		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
526		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
527		case SNB_GMCH_GMS_STOLEN_32M:
528			stolen_size = MB(32);
529			break;
530		case SNB_GMCH_GMS_STOLEN_64M:
531			stolen_size = MB(64);
532			break;
533		case SNB_GMCH_GMS_STOLEN_96M:
534			stolen_size = MB(96);
535			break;
536		case SNB_GMCH_GMS_STOLEN_128M:
537			stolen_size = MB(128);
538			break;
539		case SNB_GMCH_GMS_STOLEN_160M:
540			stolen_size = MB(160);
541			break;
542		case SNB_GMCH_GMS_STOLEN_192M:
543			stolen_size = MB(192);
544			break;
545		case SNB_GMCH_GMS_STOLEN_224M:
546			stolen_size = MB(224);
547			break;
548		case SNB_GMCH_GMS_STOLEN_256M:
549			stolen_size = MB(256);
550			break;
551		case SNB_GMCH_GMS_STOLEN_288M:
552			stolen_size = MB(288);
553			break;
554		case SNB_GMCH_GMS_STOLEN_320M:
555			stolen_size = MB(320);
556			break;
557		case SNB_GMCH_GMS_STOLEN_352M:
558			stolen_size = MB(352);
559			break;
560		case SNB_GMCH_GMS_STOLEN_384M:
561			stolen_size = MB(384);
562			break;
563		case SNB_GMCH_GMS_STOLEN_416M:
564			stolen_size = MB(416);
565			break;
566		case SNB_GMCH_GMS_STOLEN_448M:
567			stolen_size = MB(448);
568			break;
569		case SNB_GMCH_GMS_STOLEN_480M:
570			stolen_size = MB(480);
571			break;
572		case SNB_GMCH_GMS_STOLEN_512M:
573			stolen_size = MB(512);
574			break;
575		}
576	} else {
577		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
578		case I855_GMCH_GMS_STOLEN_1M:
579			stolen_size = MB(1);
580			break;
581		case I855_GMCH_GMS_STOLEN_4M:
582			stolen_size = MB(4);
583			break;
584		case I855_GMCH_GMS_STOLEN_8M:
585			stolen_size = MB(8);
586			break;
587		case I855_GMCH_GMS_STOLEN_16M:
588			stolen_size = MB(16);
589			break;
590		case I855_GMCH_GMS_STOLEN_32M:
591			stolen_size = MB(32);
592			break;
593		case I915_GMCH_GMS_STOLEN_48M:
594			stolen_size = MB(48);
595			break;
596		case I915_GMCH_GMS_STOLEN_64M:
597			stolen_size = MB(64);
598			break;
599		case G33_GMCH_GMS_STOLEN_128M:
600			stolen_size = MB(128);
601			break;
602		case G33_GMCH_GMS_STOLEN_256M:
603			stolen_size = MB(256);
604			break;
605		case INTEL_GMCH_GMS_STOLEN_96M:
606			stolen_size = MB(96);
607			break;
608		case INTEL_GMCH_GMS_STOLEN_160M:
609			stolen_size = MB(160);
610			break;
611		case INTEL_GMCH_GMS_STOLEN_224M:
612			stolen_size = MB(224);
613			break;
614		case INTEL_GMCH_GMS_STOLEN_352M:
615			stolen_size = MB(352);
616			break;
617		default:
618			stolen_size = 0;
619			break;
620		}
621	}
622
623	if (!local && stolen_size > intel_max_stolen) {
624		dev_info(&intel_private.bridge_dev->dev,
625			 "detected %dK stolen memory, trimming to %dK\n",
626			 stolen_size / KB(1), intel_max_stolen / KB(1));
627		stolen_size = intel_max_stolen;
628	} else if (stolen_size > 0) {
629		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
630		       stolen_size / KB(1), local ? "local" : "stolen");
631	} else {
632		dev_info(&intel_private.bridge_dev->dev,
633		       "no pre-allocated video memory detected\n");
634		stolen_size = 0;
635	}
636
637	stolen_entries = stolen_size/KB(4) - overhead_entries;
638
639	return stolen_entries;
640}
641
642static unsigned int intel_gtt_total_entries(void)
643{
644	int size;
645
646	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
647		u32 pgetbl_ctl;
648		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
649
650		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
651		case I965_PGETBL_SIZE_128KB:
652			size = KB(128);
653			break;
654		case I965_PGETBL_SIZE_256KB:
655			size = KB(256);
656			break;
657		case I965_PGETBL_SIZE_512KB:
658			size = KB(512);
659			break;
660		case I965_PGETBL_SIZE_1MB:
661			size = KB(1024);
662			break;
663		case I965_PGETBL_SIZE_2MB:
664			size = KB(2048);
665			break;
666		case I965_PGETBL_SIZE_1_5MB:
667			size = KB(1024 + 512);
668			break;
669		default:
670			dev_info(&intel_private.pcidev->dev,
671				 "unknown page table size, assuming 512KB\n");
672			size = KB(512);
673		}
674
675		return size/4;
676	} else if (INTEL_GTT_GEN == 6) {
677		u16 snb_gmch_ctl;
678
679		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
680		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
681		default:
682		case SNB_GTT_SIZE_0M:
683			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
684			size = MB(0);
685			break;
686		case SNB_GTT_SIZE_1M:
687			size = MB(1);
688			break;
689		case SNB_GTT_SIZE_2M:
690			size = MB(2);
691			break;
692		}
693		return size/4;
694	} else {
695		/* On previous hardware, the GTT size was just what was
696		 * required to map the aperture.
697		 */
698		return intel_private.base.gtt_mappable_entries;
699	}
700}
701
702static unsigned int intel_gtt_mappable_entries(void)
703{
704	unsigned int aperture_size;
705
706	if (INTEL_GTT_GEN == 2) {
707		u16 gmch_ctrl;
708
709		pci_read_config_word(intel_private.bridge_dev,
710				     I830_GMCH_CTRL, &gmch_ctrl);
711
712		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
713			aperture_size = MB(64);
714		else
715			aperture_size = MB(128);
716	} else {
717		/* 9xx supports large sizes, just look at the length */
718		aperture_size = pci_resource_len(intel_private.pcidev, 2);
719	}
720
721	return aperture_size >> PAGE_SHIFT;
722}
723
724static void intel_gtt_teardown_scratch_page(void)
725{
726	set_pages_wb(intel_private.scratch_page, 1);
727	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
728		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
729	put_page(intel_private.scratch_page);
730	__free_page(intel_private.scratch_page);
731}
732
733static void intel_gtt_cleanup(void)
734{
735	if (intel_private.i9xx_flush_page)
736		iounmap(intel_private.i9xx_flush_page);
737	if (intel_private.resource_valid)
738		release_resource(&intel_private.ifp_resource);
739	intel_private.ifp_resource.start = 0;
740	intel_private.resource_valid = 0;
741	iounmap(intel_private.gtt);
742	iounmap(intel_private.registers);
743
744	intel_gtt_teardown_scratch_page();
745}
746
747static int intel_gtt_init(void)
748{
749	u32 gtt_map_size;
750	int ret;
751
752	ret = intel_private.driver->setup();
753	if (ret != 0)
754		return ret;
755
756	intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
757	intel_private.base.gtt_total_entries = intel_gtt_total_entries();
758
759	gtt_map_size = intel_private.base.gtt_total_entries * 4;
760
761	intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
762				    gtt_map_size);
763	if (!intel_private.gtt) {
764		iounmap(intel_private.registers);
765		return -ENOMEM;
766	}
767
768	global_cache_flush();   /* FIXME: ? */
769
770	/* we have to call this as early as possible after the MMIO base address is known */
771	intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
772	if (intel_private.base.gtt_stolen_entries == 0) {
773		iounmap(intel_private.registers);
774		iounmap(intel_private.gtt);
775		return -ENOMEM;
776	}
777
778	ret = intel_gtt_setup_scratch_page();
779	if (ret != 0) {
780		intel_gtt_cleanup();
781		return ret;
782	}
783
784	return 0;
785}
786
787static int intel_fake_agp_fetch_size(void)
788{
789	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
790	unsigned int aper_size;
791	int i;
792
793	aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
794		    / MB(1);
795
796	for (i = 0; i < num_sizes; i++) {
797		if (aper_size == intel_fake_agp_sizes[i].size) {
798			agp_bridge->current_size =
799				(void *) (intel_fake_agp_sizes + i);
800			return aper_size;
801		}
802	}
803
804	return 0;
805}
806
807static void intel_i830_fini_flush(void)
808{
809	kunmap(intel_private.i8xx_page);
810	intel_private.i8xx_flush_page = NULL;
811	unmap_page_from_agp(intel_private.i8xx_page);
812
813	__free_page(intel_private.i8xx_page);
814	intel_private.i8xx_page = NULL;
815}
816
817static void intel_i830_setup_flush(void)
818{
819	/* return if we've already set the flush mechanism up */
820	if (intel_private.i8xx_page)
821		return;
822
823	intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
824	if (!intel_private.i8xx_page)
825		return;
826
827	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
828	if (!intel_private.i8xx_flush_page)
829		intel_i830_fini_flush();
830}
831
832/* The chipset_flush interface needs to get data that has already been
833 * flushed out of the CPU all the way out to main memory, because the GPU
834 * doesn't snoop those buffers.
835 *
836 * The 8xx series doesn't have the same lovely interface for flushing the
837 * chipset write buffers that the later chips do. According to the 865
838 * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
839 * that buffer out, we just fill 1KB and clflush it out, on the assumption
840 * that it'll push whatever was in there out.  It appears to work.
841 */
842static void i830_chipset_flush(void)
843{
844	unsigned int *pg = intel_private.i8xx_flush_page;
845
846	memset(pg, 0, 1024);
847
848	if (cpu_has_clflush)
849		clflush_cache_range(pg, 1024);
850	else if (wbinvd_on_all_cpus() != 0)
851		printk(KERN_ERR "Timed out waiting for cache flush.\n");
852}
853
854static void i830_write_entry(dma_addr_t addr, unsigned int entry,
855			     unsigned int flags)
856{
857	u32 pte_flags = I810_PTE_VALID;
858
859	switch (flags) {
860	case AGP_DCACHE_MEMORY:
861		pte_flags |= I810_PTE_LOCAL;
862		break;
863	case AGP_USER_CACHED_MEMORY:
864		pte_flags |= I830_PTE_SYSTEM_CACHED;
865		break;
866	}
867
868	writel(addr | pte_flags, intel_private.gtt + entry);
869}
870
871static void intel_enable_gtt(void)
872{
873	u32 gma_addr;
874	u16 gmch_ctrl;
875
876	if (INTEL_GTT_GEN == 2)
877		pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
878				      &gma_addr);
879	else
880		pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
881				      &gma_addr);
882
883	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
884
885	pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
886	gmch_ctrl |= I830_GMCH_ENABLED;
887	pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
888
889	writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
890	       intel_private.registers+I810_PGETBL_CTL);
891	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
892}
893
894static int i830_setup(void)
895{
896	u32 reg_addr;
897
898	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
899	reg_addr &= 0xfff80000;
900
901	intel_private.registers = ioremap(reg_addr, KB(64));
902	if (!intel_private.registers)
903		return -ENOMEM;
904
905	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
906	intel_private.pte_bus_addr =
907		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
908
909	intel_i830_setup_flush();
910
911	return 0;
912}
913
914static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
915{
916	agp_bridge->gatt_table_real = NULL;
917	agp_bridge->gatt_table = NULL;
918	agp_bridge->gatt_bus_addr = 0;
919
920	return 0;
921}
922
923static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
924{
925	return 0;
926}
927
928static int intel_fake_agp_configure(void)
929{
930	int i;
931
932	intel_enable_gtt();
933
934	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
935
936	for (i = intel_private.base.gtt_stolen_entries;
937			i < intel_private.base.gtt_total_entries; i++) {
938		intel_private.driver->write_entry(intel_private.scratch_page_dma,
939						  i, 0);
940	}
941	readl(intel_private.gtt+i-1);	/* PCI Posting. */
942
943	global_cache_flush();
944
945	return 0;
946}
947
948static bool i830_check_flags(unsigned int flags)
949{
950	switch (flags) {
951	case 0:
952	case AGP_PHYS_MEMORY:
953	case AGP_USER_CACHED_MEMORY:
954	case AGP_USER_MEMORY:
955		return true;
956	}
957
958	return false;
959}
960
961static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
962					unsigned int sg_len,
963					unsigned int pg_start,
964					unsigned int flags)
965{
966	struct scatterlist *sg;
967	unsigned int len, m;
968	int i, j;
969
970	j = pg_start;
971
972	/* sg may merge pages, but we have to separate
973	 * per-page addr for GTT */
974	for_each_sg(sg_list, sg, sg_len, i) {
975		len = sg_dma_len(sg) >> PAGE_SHIFT;
976		for (m = 0; m < len; m++) {
977			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
978			intel_private.driver->write_entry(addr,
979							  j, flags);
980			j++;
981		}
982	}
983	readl(intel_private.gtt+j-1);
984}
985
986static int intel_fake_agp_insert_entries(struct agp_memory *mem,
987					 off_t pg_start, int type)
988{
989	int i, j;
990	int ret = -EINVAL;
991
992	if (mem->page_count == 0)
993		goto out;
994
995	if (pg_start < intel_private.base.gtt_stolen_entries) {
996		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
997			   "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
998			   pg_start, intel_private.base.gtt_stolen_entries);
999
1000		dev_info(&intel_private.pcidev->dev,
1001			 "trying to insert into local/stolen memory\n");
1002		goto out_err;
1003	}
1004
1005	if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
1006		goto out_err;
1007
1008	if (type != mem->type)
1009		goto out_err;
1010
1011	if (!intel_private.driver->check_flags(type))
1012		goto out_err;
1013
1014	if (!mem->is_flushed)
1015		global_cache_flush();
1016
1017	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
1018		ret = intel_agp_map_memory(mem);
1019		if (ret != 0)
1020			return ret;
1021
1022		intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
1023					    pg_start, type);
1024	} else {
1025		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1026			dma_addr_t addr = page_to_phys(mem->pages[i]);
1027			intel_private.driver->write_entry(addr,
1028							  j, type);
1029		}
1030		readl(intel_private.gtt+j-1);
1031	}
1032
1033out:
1034	ret = 0;
1035out_err:
1036	mem->is_flushed = true;
1037	return ret;
1038}
1039
1040static int intel_fake_agp_remove_entries(struct agp_memory *mem,
1041					 off_t pg_start, int type)
1042{
1043	int i;
1044
1045	if (mem->page_count == 0)
1046		return 0;
1047
1048	if (pg_start < intel_private.base.gtt_stolen_entries) {
1049		dev_info(&intel_private.pcidev->dev,
1050			 "trying to disable local/stolen memory\n");
1051		return -EINVAL;
1052	}
1053
1054	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
1055		intel_agp_unmap_memory(mem);
1056
1057	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1058		intel_private.driver->write_entry(intel_private.scratch_page_dma,
1059						  i, 0);
1060	}
1061	readl(intel_private.gtt+i-1);
1062
1063	return 0;
1064}
1065
1066static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
1067{
1068	intel_private.driver->chipset_flush();
1069}
1070
1071static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1072						       int type)
1073{
1074	if (type == AGP_PHYS_MEMORY)
1075		return alloc_agpphysmem_i8xx(pg_count, type);
1076	/* always return NULL for other allocation types for now */
1077	return NULL;
1078}
1079
1080static int intel_alloc_chipset_flush_resource(void)
1081{
1082	int ret;
1083	ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1084				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1085				     pcibios_align_resource, intel_private.bridge_dev);
1086
1087	return ret;
1088}
1089
1090static void intel_i915_setup_chipset_flush(void)
1091{
1092	int ret;
1093	u32 temp;
1094
1095	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1096	if (!(temp & 0x1)) {
1097		intel_alloc_chipset_flush_resource();
1098		intel_private.resource_valid = 1;
1099		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1100	} else {
1101		temp &= ~1;
1102
1103		intel_private.resource_valid = 1;
1104		intel_private.ifp_resource.start = temp;
1105		intel_private.ifp_resource.end = temp + PAGE_SIZE;
1106		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1107		/* some BIOSes reserve this area in a pnp some don't */
1108		if (ret)
1109			intel_private.resource_valid = 0;
1110	}
1111}
1112
1113static void intel_i965_g33_setup_chipset_flush(void)
1114{
1115	u32 temp_hi, temp_lo;
1116	int ret;
1117
1118	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1119	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1120
1121	if (!(temp_lo & 0x1)) {
1122
1123		intel_alloc_chipset_flush_resource();
1124
1125		intel_private.resource_valid = 1;
1126		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1127			upper_32_bits(intel_private.ifp_resource.start));
1128		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1129	} else {
1130		u64 l64;
1131
1132		temp_lo &= ~0x1;
1133		l64 = ((u64)temp_hi << 32) | temp_lo;
1134
1135		intel_private.resource_valid = 1;
1136		intel_private.ifp_resource.start = l64;
1137		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1138		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1139		/* some BIOSes reserve this area in a pnp some don't */
1140		if (ret)
1141			intel_private.resource_valid = 0;
1142	}
1143}
1144
1145static void intel_i9xx_setup_flush(void)
1146{
1147	/* return if already configured */
1148	if (intel_private.ifp_resource.start)
1149		return;
1150
1151	if (INTEL_GTT_GEN == 6)
1152		return;
1153
1154	/* setup a resource for this object */
1155	intel_private.ifp_resource.name = "Intel Flush Page";
1156	intel_private.ifp_resource.flags = IORESOURCE_MEM;
1157
1158	/* Setup chipset flush for 915 */
1159	if (IS_G33 || INTEL_GTT_GEN >= 4) {
1160		intel_i965_g33_setup_chipset_flush();
1161	} else {
1162		intel_i915_setup_chipset_flush();
1163	}
1164
1165	if (intel_private.ifp_resource.start)
1166		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1167	if (!intel_private.i9xx_flush_page)
1168		dev_err(&intel_private.pcidev->dev,
1169			"can't ioremap flush page - no chipset flushing\n");
1170}
1171
1172static void i9xx_chipset_flush(void)
1173{
1174	if (intel_private.i9xx_flush_page)
1175		writel(1, intel_private.i9xx_flush_page);
1176}
1177
1178static void i965_write_entry(dma_addr_t addr, unsigned int entry,
1179			     unsigned int flags)
1180{
1181	/* Shift high bits down */
1182	addr |= (addr >> 28) & 0xf0;
1183	writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
1184}
1185
1186static bool gen6_check_flags(unsigned int flags)
1187{
1188	return true;
1189}
1190
1191static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1192			     unsigned int flags)
1193{
1194	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1195	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1196	u32 pte_flags;
1197
1198	if (type_mask == AGP_USER_UNCACHED_MEMORY)
1199		pte_flags = GEN6_PTE_UNCACHED;
1200	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1201		pte_flags = GEN6_PTE_LLC;
1202		if (gfdt)
1203			pte_flags |= GEN6_PTE_GFDT;
1204	} else { /* set 'normal'/'cached' to LLC by default */
1205		pte_flags = GEN6_PTE_LLC_MLC;
1206		if (gfdt)
1207			pte_flags |= GEN6_PTE_GFDT;
1208	}
1209
1210	/* gen6 has bit11-4 for physical addr bit39-32 */
1211	addr |= (addr >> 28) & 0xff0;
1212	writel(addr | pte_flags, intel_private.gtt + entry);
1213}
1214
1215static int i9xx_setup(void)
1216{
1217	u32 reg_addr;
1218
1219	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1220
1221	reg_addr &= 0xfff80000;
1222
1223	intel_private.registers = ioremap(reg_addr, 128 * 4096);
1224	if (!intel_private.registers)
1225		return -ENOMEM;
1226
1227	if (INTEL_GTT_GEN == 3) {
1228		u32 gtt_addr;
1229
1230		pci_read_config_dword(intel_private.pcidev,
1231				      I915_PTEADDR, &gtt_addr);
1232		intel_private.gtt_bus_addr = gtt_addr;
1233	} else {
1234		u32 gtt_offset;
1235
1236		switch (INTEL_GTT_GEN) {
1237		case 5:
1238		case 6:
1239			gtt_offset = MB(2);
1240			break;
1241		case 4:
1242		default:
1243			gtt_offset =  KB(512);
1244			break;
1245		}
1246		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1247	}
1248
1249	intel_private.pte_bus_addr =
1250		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1251
1252	intel_i9xx_setup_flush();
1253
1254	return 0;
1255}
1256
1257static const struct agp_bridge_driver intel_810_driver = {
1258	.owner			= THIS_MODULE,
1259	.aperture_sizes		= intel_i810_sizes,
1260	.size_type		= FIXED_APER_SIZE,
1261	.num_aperture_sizes	= 2,
1262	.needs_scratch_page	= true,
1263	.configure		= intel_i810_configure,
1264	.fetch_size		= intel_i810_fetch_size,
1265	.cleanup		= intel_i810_cleanup,
1266	.mask_memory		= intel_i810_mask_memory,
1267	.masks			= intel_i810_masks,
1268	.agp_enable		= intel_fake_agp_enable,
1269	.cache_flush		= global_cache_flush,
1270	.create_gatt_table	= agp_generic_create_gatt_table,
1271	.free_gatt_table	= agp_generic_free_gatt_table,
1272	.insert_memory		= intel_i810_insert_entries,
1273	.remove_memory		= intel_i810_remove_entries,
1274	.alloc_by_type		= intel_i810_alloc_by_type,
1275	.free_by_type		= intel_i810_free_by_type,
1276	.agp_alloc_page		= agp_generic_alloc_page,
1277	.agp_alloc_pages        = agp_generic_alloc_pages,
1278	.agp_destroy_page	= agp_generic_destroy_page,
1279	.agp_destroy_pages      = agp_generic_destroy_pages,
1280	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
1281};
1282
1283static const struct agp_bridge_driver intel_fake_agp_driver = {
1284	.owner			= THIS_MODULE,
1285	.size_type		= FIXED_APER_SIZE,
1286	.aperture_sizes		= intel_fake_agp_sizes,
1287	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1288	.configure		= intel_fake_agp_configure,
1289	.fetch_size		= intel_fake_agp_fetch_size,
1290	.cleanup		= intel_gtt_cleanup,
1291	.agp_enable		= intel_fake_agp_enable,
1292	.cache_flush		= global_cache_flush,
1293	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1294	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1295	.insert_memory		= intel_fake_agp_insert_entries,
1296	.remove_memory		= intel_fake_agp_remove_entries,
1297	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1298	.free_by_type		= intel_i810_free_by_type,
1299	.agp_alloc_page		= agp_generic_alloc_page,
1300	.agp_alloc_pages        = agp_generic_alloc_pages,
1301	.agp_destroy_page	= agp_generic_destroy_page,
1302	.agp_destroy_pages      = agp_generic_destroy_pages,
1303	.chipset_flush		= intel_fake_agp_chipset_flush,
1304};
1305
1306static const struct intel_gtt_driver i81x_gtt_driver = {
1307	.gen = 1,
1308};
1309static const struct intel_gtt_driver i8xx_gtt_driver = {
1310	.gen = 2,
1311	.setup = i830_setup,
1312	.write_entry = i830_write_entry,
1313	.check_flags = i830_check_flags,
1314	.chipset_flush = i830_chipset_flush,
1315};
1316static const struct intel_gtt_driver i915_gtt_driver = {
1317	.gen = 3,
1318	.setup = i9xx_setup,
1319	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1320	.write_entry = i830_write_entry,
1321	.check_flags = i830_check_flags,
1322	.chipset_flush = i9xx_chipset_flush,
1323};
1324static const struct intel_gtt_driver g33_gtt_driver = {
1325	.gen = 3,
1326	.is_g33 = 1,
1327	.setup = i9xx_setup,
1328	.write_entry = i965_write_entry,
1329	.check_flags = i830_check_flags,
1330	.chipset_flush = i9xx_chipset_flush,
1331};
1332static const struct intel_gtt_driver pineview_gtt_driver = {
1333	.gen = 3,
1334	.is_pineview = 1, .is_g33 = 1,
1335	.setup = i9xx_setup,
1336	.write_entry = i965_write_entry,
1337	.check_flags = i830_check_flags,
1338	.chipset_flush = i9xx_chipset_flush,
1339};
1340static const struct intel_gtt_driver i965_gtt_driver = {
1341	.gen = 4,
1342	.setup = i9xx_setup,
1343	.write_entry = i965_write_entry,
1344	.check_flags = i830_check_flags,
1345	.chipset_flush = i9xx_chipset_flush,
1346};
1347static const struct intel_gtt_driver g4x_gtt_driver = {
1348	.gen = 5,
1349	.setup = i9xx_setup,
1350	.write_entry = i965_write_entry,
1351	.check_flags = i830_check_flags,
1352	.chipset_flush = i9xx_chipset_flush,
1353};
1354static const struct intel_gtt_driver ironlake_gtt_driver = {
1355	.gen = 5,
1356	.is_ironlake = 1,
1357	.setup = i9xx_setup,
1358	.write_entry = i965_write_entry,
1359	.check_flags = i830_check_flags,
1360	.chipset_flush = i9xx_chipset_flush,
1361};
1362static const struct intel_gtt_driver sandybridge_gtt_driver = {
1363	.gen = 6,
1364	.setup = i9xx_setup,
1365	.write_entry = gen6_write_entry,
1366	.check_flags = gen6_check_flags,
1367	.chipset_flush = i9xx_chipset_flush,
1368};
1369
1370/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1371 * driver and gmch_driver must be non-null, and find_gmch will determine
1372 * which one should be used if a gmch_chip_id is present.
1373 */
1374static const struct intel_gtt_driver_description {
1375	unsigned int gmch_chip_id;
1376	char *name;
1377	const struct agp_bridge_driver *gmch_driver;
1378	const struct intel_gtt_driver *gtt_driver;
1379} intel_gtt_chipsets[] = {
1380	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
1381		&i81x_gtt_driver},
1382	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
1383		&i81x_gtt_driver},
1384	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
1385		&i81x_gtt_driver},
1386	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
1387		&i81x_gtt_driver},
1388	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1389		&intel_fake_agp_driver, &i8xx_gtt_driver},
1390	{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
1391		&intel_fake_agp_driver, &i8xx_gtt_driver},
1392	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
1393		&intel_fake_agp_driver, &i8xx_gtt_driver},
1394	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1395		&intel_fake_agp_driver, &i8xx_gtt_driver},
1396	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
1397		&intel_fake_agp_driver, &i8xx_gtt_driver},
1398	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1399		&intel_fake_agp_driver, &i915_gtt_driver },
1400	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1401		&intel_fake_agp_driver, &i915_gtt_driver },
1402	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1403		&intel_fake_agp_driver, &i915_gtt_driver },
1404	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1405		&intel_fake_agp_driver, &i915_gtt_driver },
1406	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1407		&intel_fake_agp_driver, &i915_gtt_driver },
1408	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1409		&intel_fake_agp_driver, &i915_gtt_driver },
1410	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1411		&intel_fake_agp_driver, &i965_gtt_driver },
1412	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1413		&intel_fake_agp_driver, &i965_gtt_driver },
1414	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1415		&intel_fake_agp_driver, &i965_gtt_driver },
1416	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1417		&intel_fake_agp_driver, &i965_gtt_driver },
1418	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1419		&intel_fake_agp_driver, &i965_gtt_driver },
1420	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1421		&intel_fake_agp_driver, &i965_gtt_driver },
1422	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1423		&intel_fake_agp_driver, &g33_gtt_driver },
1424	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1425		&intel_fake_agp_driver, &g33_gtt_driver },
1426	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1427		&intel_fake_agp_driver, &g33_gtt_driver },
1428	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1429		&intel_fake_agp_driver, &pineview_gtt_driver },
1430	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1431		&intel_fake_agp_driver, &pineview_gtt_driver },
1432	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1433		&intel_fake_agp_driver, &g4x_gtt_driver },
1434	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1435		&intel_fake_agp_driver, &g4x_gtt_driver },
1436	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1437		&intel_fake_agp_driver, &g4x_gtt_driver },
1438	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1439		&intel_fake_agp_driver, &g4x_gtt_driver },
1440	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1441		&intel_fake_agp_driver, &g4x_gtt_driver },
1442	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1443		&intel_fake_agp_driver, &g4x_gtt_driver },
1444	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1445		&intel_fake_agp_driver, &g4x_gtt_driver },
1446	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1447	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
1448	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1449	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
1450	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1451	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1452	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1453	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1454	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1455	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1456	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1457	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1458	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1459	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1460	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1461	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1462	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1463	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1464	{ 0, NULL, NULL }
1465};
1466
1467static int find_gmch(u16 device)
1468{
1469	struct pci_dev *gmch_device;
1470
1471	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1472	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1473		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1474					     device, gmch_device);
1475	}
1476
1477	if (!gmch_device)
1478		return 0;
1479
1480	intel_private.pcidev = gmch_device;
1481	return 1;
1482}
1483
1484int intel_gmch_probe(struct pci_dev *pdev,
1485				      struct agp_bridge_data *bridge)
1486{
1487	int i, mask;
1488	bridge->driver = NULL;
1489
1490	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1491		if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1492			bridge->driver =
1493				intel_gtt_chipsets[i].gmch_driver;
1494			intel_private.driver =
1495				intel_gtt_chipsets[i].gtt_driver;
1496			break;
1497		}
1498	}
1499
1500	if (!bridge->driver)
1501		return 0;
1502
1503	bridge->dev_private_data = &intel_private;
1504	bridge->dev = pdev;
1505
1506	intel_private.bridge_dev = pci_dev_get(pdev);
1507
1508	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1509
1510	if (intel_private.driver->write_entry == gen6_write_entry)
1511		mask = 40;
1512	else if (intel_private.driver->write_entry == i965_write_entry)
1513		mask = 36;
1514	else
1515		mask = 32;
1516
1517	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1518		dev_err(&intel_private.pcidev->dev,
1519			"set gfx device dma mask %d-bit failed!\n", mask);
1520	else
1521		pci_set_consistent_dma_mask(intel_private.pcidev,
1522					    DMA_BIT_MASK(mask));
1523
1524	if (bridge->driver == &intel_810_driver)
1525		return 1;
1526
1527	if (intel_gtt_init() != 0)
1528		return 0;
1529
1530	return 1;
1531}
1532EXPORT_SYMBOL(intel_gmch_probe);
1533
1534struct intel_gtt *intel_gtt_get(void)
1535{
1536	return &intel_private.base;
1537}
1538EXPORT_SYMBOL(intel_gtt_get);
1539
1540void intel_gmch_remove(struct pci_dev *pdev)
1541{
1542	if (intel_private.pcidev)
1543		pci_dev_put(intel_private.pcidev);
1544	if (intel_private.bridge_dev)
1545		pci_dev_put(intel_private.bridge_dev);
1546}
1547EXPORT_SYMBOL(intel_gmch_remove);
1548
1549MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1550MODULE_LICENSE("GPL and additional rights");
1551