intel-gtt.c revision 5cbecafce4ee8ab73c194911e01a77a7a07f034e
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/pagemap.h>
23#include <linux/agp_backend.h>
24#include <asm/smp.h>
25#include "agp.h"
26#include "intel-agp.h"
27#include <linux/intel-gtt.h>
28#include <drm/intel-gtt.h>
29
30/*
31 * If we have Intel graphics, we're not going to have anything other than
32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33 * on the Intel IOMMU support (CONFIG_DMAR).
34 * Only newer chipsets need to bother with this, of course.
35 */
36#ifdef CONFIG_DMAR
37#define USE_PCI_DMA_API 1
38#else
39#define USE_PCI_DMA_API 0
40#endif
41
42/* Max amount of stolen space, anything above will be returned to Linux */
43int intel_max_stolen = 32 * 1024 * 1024;
44EXPORT_SYMBOL(intel_max_stolen);
45
46static const struct aper_size_info_fixed intel_i810_sizes[] =
47{
48	{64, 16384, 4},
49	/* The 32M mode still requires a 64k gatt */
50	{32, 8192, 4}
51};
52
53#define AGP_DCACHE_MEMORY	1
54#define AGP_PHYS_MEMORY		2
55#define INTEL_AGP_CACHED_MEMORY 3
56
57static struct gatt_mask intel_i810_masks[] =
58{
59	{.mask = I810_PTE_VALID, .type = 0},
60	{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
61	{.mask = I810_PTE_VALID, .type = 0},
62	{.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
63	 .type = INTEL_AGP_CACHED_MEMORY}
64};
65
66#define INTEL_AGP_UNCACHED_MEMORY              0
67#define INTEL_AGP_CACHED_MEMORY_LLC            1
68#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT       2
69#define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
70#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
71
72static struct gatt_mask intel_gen6_masks[] =
73{
74	{.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
75	 .type = INTEL_AGP_UNCACHED_MEMORY },
76	{.mask = I810_PTE_VALID | GEN6_PTE_LLC,
77         .type = INTEL_AGP_CACHED_MEMORY_LLC },
78	{.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
79         .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
80	{.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
81         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
82	{.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
83         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
84};
85
86struct intel_gtt_driver {
87	unsigned int gen : 8;
88	unsigned int is_g33 : 1;
89	unsigned int is_pineview : 1;
90	unsigned int is_ironlake : 1;
91	/* Chipset specific GTT setup */
92	int (*setup)(void);
93	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
94	/* Flags is a more or less chipset specific opaque value.
95	 * For chipsets that need to support old ums (non-gem) code, this
96	 * needs to be identical to the various supported agp memory types! */
97	bool (*check_flags)(unsigned int flags);
98};
99
100static struct _intel_private {
101	struct intel_gtt base;
102	const struct intel_gtt_driver *driver;
103	struct pci_dev *pcidev;	/* device one */
104	struct pci_dev *bridge_dev;
105	u8 __iomem *registers;
106	phys_addr_t gtt_bus_addr;
107	phys_addr_t gma_bus_addr;
108	phys_addr_t pte_bus_addr;
109	u32 __iomem *gtt;		/* I915G */
110	int num_dcache_entries;
111	union {
112		void __iomem *i9xx_flush_page;
113		void *i8xx_flush_page;
114	};
115	struct page *i8xx_page;
116	struct resource ifp_resource;
117	int resource_valid;
118	struct page *scratch_page;
119	dma_addr_t scratch_page_dma;
120} intel_private;
121
122#define INTEL_GTT_GEN	intel_private.driver->gen
123#define IS_G33		intel_private.driver->is_g33
124#define IS_PINEVIEW	intel_private.driver->is_pineview
125#define IS_IRONLAKE	intel_private.driver->is_ironlake
126
127#if USE_PCI_DMA_API
128static void intel_agp_free_sglist(struct agp_memory *mem)
129{
130	struct sg_table st;
131
132	st.sgl = mem->sg_list;
133	st.orig_nents = st.nents = mem->page_count;
134
135	sg_free_table(&st);
136
137	mem->sg_list = NULL;
138	mem->num_sg = 0;
139}
140
141static int intel_agp_map_memory(struct agp_memory *mem)
142{
143	struct sg_table st;
144	struct scatterlist *sg;
145	int i;
146
147	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
148
149	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
150		goto err;
151
152	mem->sg_list = sg = st.sgl;
153
154	for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
155		sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
156
157	mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
158				 mem->page_count, PCI_DMA_BIDIRECTIONAL);
159	if (unlikely(!mem->num_sg))
160		goto err;
161
162	return 0;
163
164err:
165	sg_free_table(&st);
166	return -ENOMEM;
167}
168
169static void intel_agp_unmap_memory(struct agp_memory *mem)
170{
171	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
172
173	pci_unmap_sg(intel_private.pcidev, mem->sg_list,
174		     mem->page_count, PCI_DMA_BIDIRECTIONAL);
175	intel_agp_free_sglist(mem);
176}
177
178static void intel_agp_insert_sg_entries(struct agp_memory *mem,
179					off_t pg_start, int mask_type)
180{
181	struct scatterlist *sg;
182	int i, j;
183
184	j = pg_start;
185
186	WARN_ON(!mem->num_sg);
187
188	if (mem->num_sg == mem->page_count) {
189		for_each_sg(mem->sg_list, sg, mem->page_count, i) {
190			writel(agp_bridge->driver->mask_memory(agp_bridge,
191					sg_dma_address(sg), mask_type),
192					intel_private.gtt+j);
193			j++;
194		}
195	} else {
196		/* sg may merge pages, but we have to separate
197		 * per-page addr for GTT */
198		unsigned int len, m;
199
200		for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
201			len = sg_dma_len(sg) / PAGE_SIZE;
202			for (m = 0; m < len; m++) {
203				writel(agp_bridge->driver->mask_memory(agp_bridge,
204								       sg_dma_address(sg) + m * PAGE_SIZE,
205								       mask_type),
206				       intel_private.gtt+j);
207				j++;
208			}
209		}
210	}
211	readl(intel_private.gtt+j-1);
212}
213
214#else
215
216static void intel_agp_insert_sg_entries(struct agp_memory *mem,
217					off_t pg_start, int mask_type)
218{
219	int i, j;
220
221	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
222		writel(agp_bridge->driver->mask_memory(agp_bridge,
223				page_to_phys(mem->pages[i]), mask_type),
224		       intel_private.gtt+j);
225	}
226
227	readl(intel_private.gtt+j-1);
228}
229
230#endif
231
232static int intel_i810_fetch_size(void)
233{
234	u32 smram_miscc;
235	struct aper_size_info_fixed *values;
236
237	pci_read_config_dword(intel_private.bridge_dev,
238			      I810_SMRAM_MISCC, &smram_miscc);
239	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
240
241	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
242		dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
243		return 0;
244	}
245	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
246		agp_bridge->current_size = (void *) (values + 1);
247		agp_bridge->aperture_size_idx = 1;
248		return values[1].size;
249	} else {
250		agp_bridge->current_size = (void *) (values);
251		agp_bridge->aperture_size_idx = 0;
252		return values[0].size;
253	}
254
255	return 0;
256}
257
258static int intel_i810_configure(void)
259{
260	struct aper_size_info_fixed *current_size;
261	u32 temp;
262	int i;
263
264	current_size = A_SIZE_FIX(agp_bridge->current_size);
265
266	if (!intel_private.registers) {
267		pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
268		temp &= 0xfff80000;
269
270		intel_private.registers = ioremap(temp, 128 * 4096);
271		if (!intel_private.registers) {
272			dev_err(&intel_private.pcidev->dev,
273				"can't remap memory\n");
274			return -ENOMEM;
275		}
276	}
277
278	if ((readl(intel_private.registers+I810_DRAM_CTL)
279		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
280		/* This will need to be dynamically assigned */
281		dev_info(&intel_private.pcidev->dev,
282			 "detected 4MB dedicated video ram\n");
283		intel_private.num_dcache_entries = 1024;
284	}
285	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
286	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
287	writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
288	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
289
290	if (agp_bridge->driver->needs_scratch_page) {
291		for (i = 0; i < current_size->num_entries; i++) {
292			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
293		}
294		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI posting. */
295	}
296	global_cache_flush();
297	return 0;
298}
299
300static void intel_i810_cleanup(void)
301{
302	writel(0, intel_private.registers+I810_PGETBL_CTL);
303	readl(intel_private.registers);	/* PCI Posting. */
304	iounmap(intel_private.registers);
305}
306
307static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
308{
309	return;
310}
311
312/* Exists to support ARGB cursors */
313static struct page *i8xx_alloc_pages(void)
314{
315	struct page *page;
316
317	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
318	if (page == NULL)
319		return NULL;
320
321	if (set_pages_uc(page, 4) < 0) {
322		set_pages_wb(page, 4);
323		__free_pages(page, 2);
324		return NULL;
325	}
326	get_page(page);
327	atomic_inc(&agp_bridge->current_memory_agp);
328	return page;
329}
330
331static void i8xx_destroy_pages(struct page *page)
332{
333	if (page == NULL)
334		return;
335
336	set_pages_wb(page, 4);
337	put_page(page);
338	__free_pages(page, 2);
339	atomic_dec(&agp_bridge->current_memory_agp);
340}
341
342static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
343					int type)
344{
345	if (type < AGP_USER_TYPES)
346		return type;
347	else if (type == AGP_USER_CACHED_MEMORY)
348		return INTEL_AGP_CACHED_MEMORY;
349	else
350		return 0;
351}
352
353static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
354					int type)
355{
356	unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
357	unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
358
359	if (type_mask == AGP_USER_UNCACHED_MEMORY)
360		return INTEL_AGP_UNCACHED_MEMORY;
361	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
362		return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
363			      INTEL_AGP_CACHED_MEMORY_LLC_MLC;
364	else /* set 'normal'/'cached' to LLC by default */
365		return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
366			      INTEL_AGP_CACHED_MEMORY_LLC;
367}
368
369
370static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
371				int type)
372{
373	int i, j, num_entries;
374	void *temp;
375	int ret = -EINVAL;
376	int mask_type;
377
378	if (mem->page_count == 0)
379		goto out;
380
381	temp = agp_bridge->current_size;
382	num_entries = A_SIZE_FIX(temp)->num_entries;
383
384	if ((pg_start + mem->page_count) > num_entries)
385		goto out_err;
386
387
388	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
389		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
390			ret = -EBUSY;
391			goto out_err;
392		}
393	}
394
395	if (type != mem->type)
396		goto out_err;
397
398	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
399
400	switch (mask_type) {
401	case AGP_DCACHE_MEMORY:
402		if (!mem->is_flushed)
403			global_cache_flush();
404		for (i = pg_start; i < (pg_start + mem->page_count); i++) {
405			writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
406			       intel_private.registers+I810_PTE_BASE+(i*4));
407		}
408		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
409		break;
410	case AGP_PHYS_MEMORY:
411	case AGP_NORMAL_MEMORY:
412		if (!mem->is_flushed)
413			global_cache_flush();
414		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
415			writel(agp_bridge->driver->mask_memory(agp_bridge,
416					page_to_phys(mem->pages[i]), mask_type),
417			       intel_private.registers+I810_PTE_BASE+(j*4));
418		}
419		readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
420		break;
421	default:
422		goto out_err;
423	}
424
425out:
426	ret = 0;
427out_err:
428	mem->is_flushed = true;
429	return ret;
430}
431
432static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
433				int type)
434{
435	int i;
436
437	if (mem->page_count == 0)
438		return 0;
439
440	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
441		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
442	}
443	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
444
445	return 0;
446}
447
448/*
449 * The i810/i830 requires a physical address to program its mouse
450 * pointer into hardware.
451 * However the Xserver still writes to it through the agp aperture.
452 */
453static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
454{
455	struct agp_memory *new;
456	struct page *page;
457
458	switch (pg_count) {
459	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
460		break;
461	case 4:
462		/* kludge to get 4 physical pages for ARGB cursor */
463		page = i8xx_alloc_pages();
464		break;
465	default:
466		return NULL;
467	}
468
469	if (page == NULL)
470		return NULL;
471
472	new = agp_create_memory(pg_count);
473	if (new == NULL)
474		return NULL;
475
476	new->pages[0] = page;
477	if (pg_count == 4) {
478		/* kludge to get 4 physical pages for ARGB cursor */
479		new->pages[1] = new->pages[0] + 1;
480		new->pages[2] = new->pages[1] + 1;
481		new->pages[3] = new->pages[2] + 1;
482	}
483	new->page_count = pg_count;
484	new->num_scratch_pages = pg_count;
485	new->type = AGP_PHYS_MEMORY;
486	new->physical = page_to_phys(new->pages[0]);
487	return new;
488}
489
490static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
491{
492	struct agp_memory *new;
493
494	if (type == AGP_DCACHE_MEMORY) {
495		if (pg_count != intel_private.num_dcache_entries)
496			return NULL;
497
498		new = agp_create_memory(1);
499		if (new == NULL)
500			return NULL;
501
502		new->type = AGP_DCACHE_MEMORY;
503		new->page_count = pg_count;
504		new->num_scratch_pages = 0;
505		agp_free_page_array(new);
506		return new;
507	}
508	if (type == AGP_PHYS_MEMORY)
509		return alloc_agpphysmem_i8xx(pg_count, type);
510	return NULL;
511}
512
513static void intel_i810_free_by_type(struct agp_memory *curr)
514{
515	agp_free_key(curr->key);
516	if (curr->type == AGP_PHYS_MEMORY) {
517		if (curr->page_count == 4)
518			i8xx_destroy_pages(curr->pages[0]);
519		else {
520			agp_bridge->driver->agp_destroy_page(curr->pages[0],
521							     AGP_PAGE_DESTROY_UNMAP);
522			agp_bridge->driver->agp_destroy_page(curr->pages[0],
523							     AGP_PAGE_DESTROY_FREE);
524		}
525		agp_free_page_array(curr);
526	}
527	kfree(curr);
528}
529
530static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
531					    dma_addr_t addr, int type)
532{
533	/* Type checking must be done elsewhere */
534	return addr | bridge->driver->masks[type].mask;
535}
536
537static int intel_gtt_setup_scratch_page(void)
538{
539	struct page *page;
540	dma_addr_t dma_addr;
541
542	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
543	if (page == NULL)
544		return -ENOMEM;
545	get_page(page);
546	set_pages_uc(page, 1);
547
548	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
549		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
550				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
551		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
552			return -EINVAL;
553
554		intel_private.scratch_page_dma = dma_addr;
555	} else
556		intel_private.scratch_page_dma = page_to_phys(page);
557
558	intel_private.scratch_page = page;
559
560	return 0;
561}
562
563static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
564	{128, 32768, 5},
565	/* The 64M mode still requires a 128k gatt */
566	{64, 16384, 5},
567	{256, 65536, 6},
568	{512, 131072, 7},
569};
570
571static unsigned int intel_gtt_stolen_entries(void)
572{
573	u16 gmch_ctrl;
574	u8 rdct;
575	int local = 0;
576	static const int ddt[4] = { 0, 16, 32, 64 };
577	unsigned int overhead_entries, stolen_entries;
578	unsigned int stolen_size = 0;
579
580	pci_read_config_word(intel_private.bridge_dev,
581			     I830_GMCH_CTRL, &gmch_ctrl);
582
583	if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
584		overhead_entries = 0;
585	else
586		overhead_entries = intel_private.base.gtt_mappable_entries
587			/ 1024;
588
589	overhead_entries += 1; /* BIOS popup */
590
591	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
592	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
593		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
594		case I830_GMCH_GMS_STOLEN_512:
595			stolen_size = KB(512);
596			break;
597		case I830_GMCH_GMS_STOLEN_1024:
598			stolen_size = MB(1);
599			break;
600		case I830_GMCH_GMS_STOLEN_8192:
601			stolen_size = MB(8);
602			break;
603		case I830_GMCH_GMS_LOCAL:
604			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
605			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
606					MB(ddt[I830_RDRAM_DDT(rdct)]);
607			local = 1;
608			break;
609		default:
610			stolen_size = 0;
611			break;
612		}
613	} else if (INTEL_GTT_GEN == 6) {
614		/*
615		 * SandyBridge has new memory control reg at 0x50.w
616		 */
617		u16 snb_gmch_ctl;
618		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
619		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
620		case SNB_GMCH_GMS_STOLEN_32M:
621			stolen_size = MB(32);
622			break;
623		case SNB_GMCH_GMS_STOLEN_64M:
624			stolen_size = MB(64);
625			break;
626		case SNB_GMCH_GMS_STOLEN_96M:
627			stolen_size = MB(96);
628			break;
629		case SNB_GMCH_GMS_STOLEN_128M:
630			stolen_size = MB(128);
631			break;
632		case SNB_GMCH_GMS_STOLEN_160M:
633			stolen_size = MB(160);
634			break;
635		case SNB_GMCH_GMS_STOLEN_192M:
636			stolen_size = MB(192);
637			break;
638		case SNB_GMCH_GMS_STOLEN_224M:
639			stolen_size = MB(224);
640			break;
641		case SNB_GMCH_GMS_STOLEN_256M:
642			stolen_size = MB(256);
643			break;
644		case SNB_GMCH_GMS_STOLEN_288M:
645			stolen_size = MB(288);
646			break;
647		case SNB_GMCH_GMS_STOLEN_320M:
648			stolen_size = MB(320);
649			break;
650		case SNB_GMCH_GMS_STOLEN_352M:
651			stolen_size = MB(352);
652			break;
653		case SNB_GMCH_GMS_STOLEN_384M:
654			stolen_size = MB(384);
655			break;
656		case SNB_GMCH_GMS_STOLEN_416M:
657			stolen_size = MB(416);
658			break;
659		case SNB_GMCH_GMS_STOLEN_448M:
660			stolen_size = MB(448);
661			break;
662		case SNB_GMCH_GMS_STOLEN_480M:
663			stolen_size = MB(480);
664			break;
665		case SNB_GMCH_GMS_STOLEN_512M:
666			stolen_size = MB(512);
667			break;
668		}
669	} else {
670		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
671		case I855_GMCH_GMS_STOLEN_1M:
672			stolen_size = MB(1);
673			break;
674		case I855_GMCH_GMS_STOLEN_4M:
675			stolen_size = MB(4);
676			break;
677		case I855_GMCH_GMS_STOLEN_8M:
678			stolen_size = MB(8);
679			break;
680		case I855_GMCH_GMS_STOLEN_16M:
681			stolen_size = MB(16);
682			break;
683		case I855_GMCH_GMS_STOLEN_32M:
684			stolen_size = MB(32);
685			break;
686		case I915_GMCH_GMS_STOLEN_48M:
687			stolen_size = MB(48);
688			break;
689		case I915_GMCH_GMS_STOLEN_64M:
690			stolen_size = MB(64);
691			break;
692		case G33_GMCH_GMS_STOLEN_128M:
693			stolen_size = MB(128);
694			break;
695		case G33_GMCH_GMS_STOLEN_256M:
696			stolen_size = MB(256);
697			break;
698		case INTEL_GMCH_GMS_STOLEN_96M:
699			stolen_size = MB(96);
700			break;
701		case INTEL_GMCH_GMS_STOLEN_160M:
702			stolen_size = MB(160);
703			break;
704		case INTEL_GMCH_GMS_STOLEN_224M:
705			stolen_size = MB(224);
706			break;
707		case INTEL_GMCH_GMS_STOLEN_352M:
708			stolen_size = MB(352);
709			break;
710		default:
711			stolen_size = 0;
712			break;
713		}
714	}
715
716	if (!local && stolen_size > intel_max_stolen) {
717		dev_info(&intel_private.bridge_dev->dev,
718			 "detected %dK stolen memory, trimming to %dK\n",
719			 stolen_size / KB(1), intel_max_stolen / KB(1));
720		stolen_size = intel_max_stolen;
721	} else if (stolen_size > 0) {
722		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
723		       stolen_size / KB(1), local ? "local" : "stolen");
724	} else {
725		dev_info(&intel_private.bridge_dev->dev,
726		       "no pre-allocated video memory detected\n");
727		stolen_size = 0;
728	}
729
730	stolen_entries = stolen_size/KB(4) - overhead_entries;
731
732	return stolen_entries;
733}
734
735static unsigned int intel_gtt_total_entries(void)
736{
737	int size;
738
739	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
740		u32 pgetbl_ctl;
741		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
742
743		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
744		case I965_PGETBL_SIZE_128KB:
745			size = KB(128);
746			break;
747		case I965_PGETBL_SIZE_256KB:
748			size = KB(256);
749			break;
750		case I965_PGETBL_SIZE_512KB:
751			size = KB(512);
752			break;
753		case I965_PGETBL_SIZE_1MB:
754			size = KB(1024);
755			break;
756		case I965_PGETBL_SIZE_2MB:
757			size = KB(2048);
758			break;
759		case I965_PGETBL_SIZE_1_5MB:
760			size = KB(1024 + 512);
761			break;
762		default:
763			dev_info(&intel_private.pcidev->dev,
764				 "unknown page table size, assuming 512KB\n");
765			size = KB(512);
766		}
767
768		return size/4;
769	} else if (INTEL_GTT_GEN == 6) {
770		u16 snb_gmch_ctl;
771
772		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
773		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
774		default:
775		case SNB_GTT_SIZE_0M:
776			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
777			size = MB(0);
778			break;
779		case SNB_GTT_SIZE_1M:
780			size = MB(1);
781			break;
782		case SNB_GTT_SIZE_2M:
783			size = MB(2);
784			break;
785		}
786		return size/4;
787	} else {
788		/* On previous hardware, the GTT size was just what was
789		 * required to map the aperture.
790		 */
791		return intel_private.base.gtt_mappable_entries;
792	}
793}
794
795static unsigned int intel_gtt_mappable_entries(void)
796{
797	unsigned int aperture_size;
798
799	if (INTEL_GTT_GEN == 2) {
800		u16 gmch_ctrl;
801
802		pci_read_config_word(intel_private.bridge_dev,
803				     I830_GMCH_CTRL, &gmch_ctrl);
804
805		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
806			aperture_size = MB(64);
807		else
808			aperture_size = MB(128);
809	} else {
810		/* 9xx supports large sizes, just look at the length */
811		aperture_size = pci_resource_len(intel_private.pcidev, 2);
812	}
813
814	return aperture_size >> PAGE_SHIFT;
815}
816
817static void intel_gtt_teardown_scratch_page(void)
818{
819	set_pages_wb(intel_private.scratch_page, 1);
820	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
821		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
822	put_page(intel_private.scratch_page);
823	__free_page(intel_private.scratch_page);
824}
825
826static void intel_gtt_cleanup(void)
827{
828	if (intel_private.i9xx_flush_page)
829		iounmap(intel_private.i9xx_flush_page);
830	if (intel_private.resource_valid)
831		release_resource(&intel_private.ifp_resource);
832	intel_private.ifp_resource.start = 0;
833	intel_private.resource_valid = 0;
834	iounmap(intel_private.gtt);
835	iounmap(intel_private.registers);
836
837	intel_gtt_teardown_scratch_page();
838}
839
840static int intel_gtt_init(void)
841{
842	u32 gtt_map_size;
843	int ret;
844
845	ret = intel_private.driver->setup();
846	if (ret != 0)
847		return ret;
848
849	intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
850	intel_private.base.gtt_total_entries = intel_gtt_total_entries();
851
852	gtt_map_size = intel_private.base.gtt_total_entries * 4;
853
854	intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
855				    gtt_map_size);
856	if (!intel_private.gtt) {
857		iounmap(intel_private.registers);
858		return -ENOMEM;
859	}
860
861	global_cache_flush();   /* FIXME: ? */
862
863	/* we have to call this as early as possible after the MMIO base address is known */
864	intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
865	if (intel_private.base.gtt_stolen_entries == 0) {
866		iounmap(intel_private.registers);
867		iounmap(intel_private.gtt);
868		return -ENOMEM;
869	}
870
871	ret = intel_gtt_setup_scratch_page();
872	if (ret != 0) {
873		intel_gtt_cleanup();
874		return ret;
875	}
876
877	return 0;
878}
879
880static int intel_fake_agp_fetch_size(void)
881{
882	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
883	unsigned int aper_size;
884	int i;
885
886	aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
887		    / MB(1);
888
889	for (i = 0; i < num_sizes; i++) {
890		if (aper_size == intel_fake_agp_sizes[i].size) {
891			agp_bridge->current_size =
892				(void *) (intel_fake_agp_sizes + i);
893			return aper_size;
894		}
895	}
896
897	return 0;
898}
899
900static void intel_i830_fini_flush(void)
901{
902	kunmap(intel_private.i8xx_page);
903	intel_private.i8xx_flush_page = NULL;
904	unmap_page_from_agp(intel_private.i8xx_page);
905
906	__free_page(intel_private.i8xx_page);
907	intel_private.i8xx_page = NULL;
908}
909
910static void intel_i830_setup_flush(void)
911{
912	/* return if we've already set the flush mechanism up */
913	if (intel_private.i8xx_page)
914		return;
915
916	intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
917	if (!intel_private.i8xx_page)
918		return;
919
920	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
921	if (!intel_private.i8xx_flush_page)
922		intel_i830_fini_flush();
923}
924
925/* The chipset_flush interface needs to get data that has already been
926 * flushed out of the CPU all the way out to main memory, because the GPU
927 * doesn't snoop those buffers.
928 *
929 * The 8xx series doesn't have the same lovely interface for flushing the
930 * chipset write buffers that the later chips do. According to the 865
931 * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
932 * that buffer out, we just fill 1KB and clflush it out, on the assumption
933 * that it'll push whatever was in there out.  It appears to work.
934 */
935static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
936{
937	unsigned int *pg = intel_private.i8xx_flush_page;
938
939	memset(pg, 0, 1024);
940
941	if (cpu_has_clflush)
942		clflush_cache_range(pg, 1024);
943	else if (wbinvd_on_all_cpus() != 0)
944		printk(KERN_ERR "Timed out waiting for cache flush.\n");
945}
946
947static void i830_write_entry(dma_addr_t addr, unsigned int entry,
948			     unsigned int flags)
949{
950	u32 pte_flags = I810_PTE_VALID;
951
952	switch (flags) {
953	case AGP_DCACHE_MEMORY:
954		pte_flags |= I810_PTE_LOCAL;
955		break;
956	case AGP_USER_CACHED_MEMORY:
957		pte_flags |= I830_PTE_SYSTEM_CACHED;
958		break;
959	}
960
961	writel(addr | pte_flags, intel_private.gtt + entry);
962}
963
964static void intel_enable_gtt(void)
965{
966	u32 gma_addr;
967	u16 gmch_ctrl;
968
969	if (INTEL_GTT_GEN == 2)
970		pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
971				      &gma_addr);
972	else
973		pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
974				      &gma_addr);
975
976	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
977
978	pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
979	gmch_ctrl |= I830_GMCH_ENABLED;
980	pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
981
982	writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
983	       intel_private.registers+I810_PGETBL_CTL);
984	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
985}
986
987static int i830_setup(void)
988{
989	u32 reg_addr;
990
991	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
992	reg_addr &= 0xfff80000;
993
994	intel_private.registers = ioremap(reg_addr, KB(64));
995	if (!intel_private.registers)
996		return -ENOMEM;
997
998	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
999	intel_private.pte_bus_addr =
1000		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1001
1002	intel_i830_setup_flush();
1003
1004	return 0;
1005}
1006
1007static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
1008{
1009	agp_bridge->gatt_table_real = NULL;
1010	agp_bridge->gatt_table = NULL;
1011	agp_bridge->gatt_bus_addr = 0;
1012
1013	return 0;
1014}
1015
1016static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
1017{
1018	return 0;
1019}
1020
1021static int intel_fake_agp_configure(void)
1022{
1023	int i;
1024
1025	intel_enable_gtt();
1026
1027	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
1028
1029	for (i = intel_private.base.gtt_stolen_entries;
1030			i < intel_private.base.gtt_total_entries; i++) {
1031		intel_private.driver->write_entry(intel_private.scratch_page_dma,
1032						  i, 0);
1033	}
1034	readl(intel_private.gtt+i-1);	/* PCI Posting. */
1035
1036	global_cache_flush();
1037
1038	return 0;
1039}
1040
1041static bool i830_check_flags(unsigned int flags)
1042{
1043	switch (flags) {
1044	case 0:
1045	case AGP_PHYS_MEMORY:
1046	case AGP_USER_CACHED_MEMORY:
1047	case AGP_USER_MEMORY:
1048		return true;
1049	}
1050
1051	return false;
1052}
1053
1054static int intel_fake_agp_insert_entries(struct agp_memory *mem,
1055					 off_t pg_start, int type)
1056{
1057	int i, j;
1058	int ret = -EINVAL;
1059
1060	if (mem->page_count == 0)
1061		goto out;
1062
1063	if (pg_start < intel_private.base.gtt_stolen_entries) {
1064		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1065			   "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
1066			   pg_start, intel_private.base.gtt_stolen_entries);
1067
1068		dev_info(&intel_private.pcidev->dev,
1069			 "trying to insert into local/stolen memory\n");
1070		goto out_err;
1071	}
1072
1073	if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
1074		goto out_err;
1075
1076	if (type != mem->type)
1077		goto out_err;
1078
1079	if (!intel_private.driver->check_flags(type))
1080		goto out_err;
1081
1082	if (!mem->is_flushed)
1083		global_cache_flush();
1084
1085	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1086		intel_private.driver->write_entry(page_to_phys(mem->pages[i]),
1087						  j, type);
1088	}
1089	readl(intel_private.gtt+j-1);
1090
1091out:
1092	ret = 0;
1093out_err:
1094	mem->is_flushed = true;
1095	return ret;
1096}
1097
1098static int intel_fake_agp_remove_entries(struct agp_memory *mem,
1099					 off_t pg_start, int type)
1100{
1101	int i;
1102
1103	if (mem->page_count == 0)
1104		return 0;
1105
1106	if (pg_start < intel_private.base.gtt_stolen_entries) {
1107		dev_info(&intel_private.pcidev->dev,
1108			 "trying to disable local/stolen memory\n");
1109		return -EINVAL;
1110	}
1111
1112	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1113		intel_private.driver->write_entry(intel_private.scratch_page_dma,
1114						  i, 0);
1115	}
1116	readl(intel_private.gtt+i-1);
1117
1118	return 0;
1119}
1120
1121static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1122						       int type)
1123{
1124	if (type == AGP_PHYS_MEMORY)
1125		return alloc_agpphysmem_i8xx(pg_count, type);
1126	/* always return NULL for other allocation types for now */
1127	return NULL;
1128}
1129
1130static int intel_alloc_chipset_flush_resource(void)
1131{
1132	int ret;
1133	ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1134				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1135				     pcibios_align_resource, intel_private.bridge_dev);
1136
1137	return ret;
1138}
1139
1140static void intel_i915_setup_chipset_flush(void)
1141{
1142	int ret;
1143	u32 temp;
1144
1145	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1146	if (!(temp & 0x1)) {
1147		intel_alloc_chipset_flush_resource();
1148		intel_private.resource_valid = 1;
1149		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1150	} else {
1151		temp &= ~1;
1152
1153		intel_private.resource_valid = 1;
1154		intel_private.ifp_resource.start = temp;
1155		intel_private.ifp_resource.end = temp + PAGE_SIZE;
1156		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1157		/* some BIOSes reserve this area in a pnp some don't */
1158		if (ret)
1159			intel_private.resource_valid = 0;
1160	}
1161}
1162
1163static void intel_i965_g33_setup_chipset_flush(void)
1164{
1165	u32 temp_hi, temp_lo;
1166	int ret;
1167
1168	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1169	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1170
1171	if (!(temp_lo & 0x1)) {
1172
1173		intel_alloc_chipset_flush_resource();
1174
1175		intel_private.resource_valid = 1;
1176		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1177			upper_32_bits(intel_private.ifp_resource.start));
1178		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1179	} else {
1180		u64 l64;
1181
1182		temp_lo &= ~0x1;
1183		l64 = ((u64)temp_hi << 32) | temp_lo;
1184
1185		intel_private.resource_valid = 1;
1186		intel_private.ifp_resource.start = l64;
1187		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1188		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1189		/* some BIOSes reserve this area in a pnp some don't */
1190		if (ret)
1191			intel_private.resource_valid = 0;
1192	}
1193}
1194
1195static void intel_i9xx_setup_flush(void)
1196{
1197	/* return if already configured */
1198	if (intel_private.ifp_resource.start)
1199		return;
1200
1201	if (INTEL_GTT_GEN == 6)
1202		return;
1203
1204	/* setup a resource for this object */
1205	intel_private.ifp_resource.name = "Intel Flush Page";
1206	intel_private.ifp_resource.flags = IORESOURCE_MEM;
1207
1208	/* Setup chipset flush for 915 */
1209	if (IS_G33 || INTEL_GTT_GEN >= 4) {
1210		intel_i965_g33_setup_chipset_flush();
1211	} else {
1212		intel_i915_setup_chipset_flush();
1213	}
1214
1215	if (intel_private.ifp_resource.start)
1216		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1217	if (!intel_private.i9xx_flush_page)
1218		dev_err(&intel_private.pcidev->dev,
1219			"can't ioremap flush page - no chipset flushing\n");
1220}
1221
1222static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1223{
1224	if (intel_private.i9xx_flush_page)
1225		writel(1, intel_private.i9xx_flush_page);
1226}
1227
1228static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1229				     int type)
1230{
1231	int num_entries;
1232	void *temp;
1233	int ret = -EINVAL;
1234	int mask_type;
1235
1236	if (mem->page_count == 0)
1237		goto out;
1238
1239	temp = agp_bridge->current_size;
1240	num_entries = A_SIZE_FIX(temp)->num_entries;
1241
1242	if (pg_start < intel_private.base.gtt_stolen_entries) {
1243		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1244			   "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
1245			   pg_start, intel_private.base.gtt_stolen_entries);
1246
1247		dev_info(&intel_private.pcidev->dev,
1248			 "trying to insert into local/stolen memory\n");
1249		goto out_err;
1250	}
1251
1252	if ((pg_start + mem->page_count) > num_entries)
1253		goto out_err;
1254
1255	/* The i915 can't check the GTT for entries since it's read only;
1256	 * depend on the caller to make the correct offset decisions.
1257	 */
1258
1259	if (type != mem->type)
1260		goto out_err;
1261
1262	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1263
1264	if (INTEL_GTT_GEN != 6 && mask_type != 0 &&
1265	    mask_type != AGP_PHYS_MEMORY &&
1266	    mask_type != INTEL_AGP_CACHED_MEMORY)
1267		goto out_err;
1268
1269	if (!mem->is_flushed)
1270		global_cache_flush();
1271
1272	intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1273
1274 out:
1275	ret = 0;
1276 out_err:
1277	mem->is_flushed = true;
1278	return ret;
1279}
1280
1281static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1282				     int type)
1283{
1284	int i;
1285
1286	if (mem->page_count == 0)
1287		return 0;
1288
1289	if (pg_start < intel_private.base.gtt_stolen_entries) {
1290		dev_info(&intel_private.pcidev->dev,
1291			 "trying to disable local/stolen memory\n");
1292		return -EINVAL;
1293	}
1294
1295	for (i = pg_start; i < (mem->page_count + pg_start); i++)
1296		writel(agp_bridge->scratch_page, intel_private.gtt+i);
1297
1298	readl(intel_private.gtt+i-1);
1299
1300	return 0;
1301}
1302
1303static void i965_write_entry(dma_addr_t addr, unsigned int entry,
1304			     unsigned int flags)
1305{
1306	/* Shift high bits down */
1307	addr |= (addr >> 28) & 0xf0;
1308	writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
1309}
1310
1311static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1312			     unsigned int flags)
1313{
1314	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1315	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1316	u32 pte_flags;
1317
1318	if (type_mask == AGP_USER_UNCACHED_MEMORY)
1319		pte_flags = GEN6_PTE_UNCACHED;
1320	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1321		pte_flags = GEN6_PTE_LLC;
1322		if (gfdt)
1323			pte_flags |= GEN6_PTE_GFDT;
1324	} else { /* set 'normal'/'cached' to LLC by default */
1325		pte_flags = GEN6_PTE_LLC_MLC;
1326		if (gfdt)
1327			pte_flags |= GEN6_PTE_GFDT;
1328	}
1329
1330	/* gen6 has bit11-4 for physical addr bit39-32 */
1331	addr |= (addr >> 28) & 0xff0;
1332	writel(addr | pte_flags, intel_private.gtt + entry);
1333}
1334
1335static int i9xx_setup(void)
1336{
1337	u32 reg_addr;
1338
1339	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1340
1341	reg_addr &= 0xfff80000;
1342
1343	intel_private.registers = ioremap(reg_addr, 128 * 4096);
1344	if (!intel_private.registers)
1345		return -ENOMEM;
1346
1347	if (INTEL_GTT_GEN == 3) {
1348		u32 gtt_addr;
1349
1350		pci_read_config_dword(intel_private.pcidev,
1351				      I915_PTEADDR, &gtt_addr);
1352		intel_private.gtt_bus_addr = gtt_addr;
1353	} else {
1354		u32 gtt_offset;
1355
1356		switch (INTEL_GTT_GEN) {
1357		case 5:
1358		case 6:
1359			gtt_offset = MB(2);
1360			break;
1361		case 4:
1362		default:
1363			gtt_offset =  KB(512);
1364			break;
1365		}
1366		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1367	}
1368
1369	intel_private.pte_bus_addr =
1370		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1371
1372	intel_i9xx_setup_flush();
1373
1374	return 0;
1375}
1376
1377/*
1378 * The i965 supports 36-bit physical addresses, but to keep
1379 * the format of the GTT the same, the bits that don't fit
1380 * in a 32-bit word are shifted down to bits 4..7.
1381 *
1382 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1383 * is always zero on 32-bit architectures, so no need to make
1384 * this conditional.
1385 */
1386static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1387					    dma_addr_t addr, int type)
1388{
1389	/* Shift high bits down */
1390	addr |= (addr >> 28) & 0xf0;
1391
1392	/* Type checking must be done elsewhere */
1393	return addr | bridge->driver->masks[type].mask;
1394}
1395
1396static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
1397					    dma_addr_t addr, int type)
1398{
1399	/* gen6 has bit11-4 for physical addr bit39-32 */
1400	addr |= (addr >> 28) & 0xff0;
1401
1402	/* Type checking must be done elsewhere */
1403	return addr | bridge->driver->masks[type].mask;
1404}
1405
1406static const struct agp_bridge_driver intel_810_driver = {
1407	.owner			= THIS_MODULE,
1408	.aperture_sizes		= intel_i810_sizes,
1409	.size_type		= FIXED_APER_SIZE,
1410	.num_aperture_sizes	= 2,
1411	.needs_scratch_page	= true,
1412	.configure		= intel_i810_configure,
1413	.fetch_size		= intel_i810_fetch_size,
1414	.cleanup		= intel_i810_cleanup,
1415	.mask_memory		= intel_i810_mask_memory,
1416	.masks			= intel_i810_masks,
1417	.agp_enable		= intel_fake_agp_enable,
1418	.cache_flush		= global_cache_flush,
1419	.create_gatt_table	= agp_generic_create_gatt_table,
1420	.free_gatt_table	= agp_generic_free_gatt_table,
1421	.insert_memory		= intel_i810_insert_entries,
1422	.remove_memory		= intel_i810_remove_entries,
1423	.alloc_by_type		= intel_i810_alloc_by_type,
1424	.free_by_type		= intel_i810_free_by_type,
1425	.agp_alloc_page		= agp_generic_alloc_page,
1426	.agp_alloc_pages        = agp_generic_alloc_pages,
1427	.agp_destroy_page	= agp_generic_destroy_page,
1428	.agp_destroy_pages      = agp_generic_destroy_pages,
1429	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
1430};
1431
1432static const struct agp_bridge_driver intel_830_driver = {
1433	.owner			= THIS_MODULE,
1434	.size_type		= FIXED_APER_SIZE,
1435	.aperture_sizes		= intel_fake_agp_sizes,
1436	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1437	.configure		= intel_fake_agp_configure,
1438	.fetch_size		= intel_fake_agp_fetch_size,
1439	.cleanup		= intel_gtt_cleanup,
1440	.mask_memory		= intel_i810_mask_memory,
1441	.masks			= intel_i810_masks,
1442	.agp_enable		= intel_fake_agp_enable,
1443	.cache_flush		= global_cache_flush,
1444	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1445	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1446	.insert_memory		= intel_fake_agp_insert_entries,
1447	.remove_memory		= intel_fake_agp_remove_entries,
1448	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1449	.free_by_type		= intel_i810_free_by_type,
1450	.agp_alloc_page		= agp_generic_alloc_page,
1451	.agp_alloc_pages        = agp_generic_alloc_pages,
1452	.agp_destroy_page	= agp_generic_destroy_page,
1453	.agp_destroy_pages      = agp_generic_destroy_pages,
1454	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1455	.chipset_flush		= intel_i830_chipset_flush,
1456};
1457
1458static const struct agp_bridge_driver intel_915_driver = {
1459	.owner			= THIS_MODULE,
1460	.size_type		= FIXED_APER_SIZE,
1461	.aperture_sizes		= intel_fake_agp_sizes,
1462	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1463	.configure		= intel_fake_agp_configure,
1464	.fetch_size		= intel_fake_agp_fetch_size,
1465	.cleanup		= intel_gtt_cleanup,
1466	.mask_memory		= intel_i810_mask_memory,
1467	.masks			= intel_i810_masks,
1468	.agp_enable		= intel_fake_agp_enable,
1469	.cache_flush		= global_cache_flush,
1470	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1471	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1472	.insert_memory		= intel_i915_insert_entries,
1473	.remove_memory		= intel_i915_remove_entries,
1474	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1475	.free_by_type		= intel_i810_free_by_type,
1476	.agp_alloc_page		= agp_generic_alloc_page,
1477	.agp_alloc_pages        = agp_generic_alloc_pages,
1478	.agp_destroy_page	= agp_generic_destroy_page,
1479	.agp_destroy_pages      = agp_generic_destroy_pages,
1480	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1481	.chipset_flush		= intel_i915_chipset_flush,
1482#if USE_PCI_DMA_API
1483	.agp_map_memory		= intel_agp_map_memory,
1484	.agp_unmap_memory	= intel_agp_unmap_memory,
1485#endif
1486};
1487
1488static const struct agp_bridge_driver intel_i965_driver = {
1489	.owner			= THIS_MODULE,
1490	.size_type		= FIXED_APER_SIZE,
1491	.aperture_sizes		= intel_fake_agp_sizes,
1492	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1493	.configure		= intel_fake_agp_configure,
1494	.fetch_size		= intel_fake_agp_fetch_size,
1495	.cleanup		= intel_gtt_cleanup,
1496	.mask_memory		= intel_i965_mask_memory,
1497	.masks			= intel_i810_masks,
1498	.agp_enable		= intel_fake_agp_enable,
1499	.cache_flush		= global_cache_flush,
1500	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1501	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1502	.insert_memory		= intel_i915_insert_entries,
1503	.remove_memory		= intel_i915_remove_entries,
1504	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1505	.free_by_type		= intel_i810_free_by_type,
1506	.agp_alloc_page		= agp_generic_alloc_page,
1507	.agp_alloc_pages        = agp_generic_alloc_pages,
1508	.agp_destroy_page	= agp_generic_destroy_page,
1509	.agp_destroy_pages      = agp_generic_destroy_pages,
1510	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
1511	.chipset_flush		= intel_i915_chipset_flush,
1512#if USE_PCI_DMA_API
1513	.agp_map_memory		= intel_agp_map_memory,
1514	.agp_unmap_memory	= intel_agp_unmap_memory,
1515#endif
1516};
1517
1518static const struct agp_bridge_driver intel_gen6_driver = {
1519	.owner			= THIS_MODULE,
1520	.size_type		= FIXED_APER_SIZE,
1521	.aperture_sizes		= intel_fake_agp_sizes,
1522	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1523	.configure		= intel_fake_agp_configure,
1524	.fetch_size		= intel_fake_agp_fetch_size,
1525	.cleanup		= intel_gtt_cleanup,
1526	.mask_memory		= intel_gen6_mask_memory,
1527	.masks			= intel_gen6_masks,
1528	.agp_enable		= intel_fake_agp_enable,
1529	.cache_flush		= global_cache_flush,
1530	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1531	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1532	.insert_memory		= intel_i915_insert_entries,
1533	.remove_memory		= intel_i915_remove_entries,
1534	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1535	.free_by_type		= intel_i810_free_by_type,
1536	.agp_alloc_page		= agp_generic_alloc_page,
1537	.agp_alloc_pages        = agp_generic_alloc_pages,
1538	.agp_destroy_page	= agp_generic_destroy_page,
1539	.agp_destroy_pages      = agp_generic_destroy_pages,
1540	.agp_type_to_mask_type	= intel_gen6_type_to_mask_type,
1541	.chipset_flush		= intel_i915_chipset_flush,
1542#if USE_PCI_DMA_API
1543	.agp_map_memory		= intel_agp_map_memory,
1544	.agp_unmap_memory	= intel_agp_unmap_memory,
1545#endif
1546};
1547
1548static const struct agp_bridge_driver intel_g33_driver = {
1549	.owner			= THIS_MODULE,
1550	.size_type		= FIXED_APER_SIZE,
1551	.aperture_sizes		= intel_fake_agp_sizes,
1552	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1553	.configure		= intel_fake_agp_configure,
1554	.fetch_size		= intel_fake_agp_fetch_size,
1555	.cleanup		= intel_gtt_cleanup,
1556	.mask_memory		= intel_i965_mask_memory,
1557	.masks			= intel_i810_masks,
1558	.agp_enable		= intel_fake_agp_enable,
1559	.cache_flush		= global_cache_flush,
1560	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1561	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1562	.insert_memory		= intel_i915_insert_entries,
1563	.remove_memory		= intel_i915_remove_entries,
1564	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1565	.free_by_type		= intel_i810_free_by_type,
1566	.agp_alloc_page		= agp_generic_alloc_page,
1567	.agp_alloc_pages        = agp_generic_alloc_pages,
1568	.agp_destroy_page	= agp_generic_destroy_page,
1569	.agp_destroy_pages      = agp_generic_destroy_pages,
1570	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
1571	.chipset_flush		= intel_i915_chipset_flush,
1572#if USE_PCI_DMA_API
1573	.agp_map_memory		= intel_agp_map_memory,
1574	.agp_unmap_memory	= intel_agp_unmap_memory,
1575#endif
1576};
1577
1578static const struct intel_gtt_driver i8xx_gtt_driver = {
1579	.gen = 2,
1580	.setup = i830_setup,
1581	.write_entry = i830_write_entry,
1582	.check_flags = i830_check_flags,
1583};
1584static const struct intel_gtt_driver i915_gtt_driver = {
1585	.gen = 3,
1586	.setup = i9xx_setup,
1587	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1588	.write_entry = i830_write_entry,
1589};
1590static const struct intel_gtt_driver g33_gtt_driver = {
1591	.gen = 3,
1592	.is_g33 = 1,
1593	.setup = i9xx_setup,
1594	.write_entry = i965_write_entry,
1595};
1596static const struct intel_gtt_driver pineview_gtt_driver = {
1597	.gen = 3,
1598	.is_pineview = 1, .is_g33 = 1,
1599	.setup = i9xx_setup,
1600	.write_entry = i965_write_entry,
1601};
1602static const struct intel_gtt_driver i965_gtt_driver = {
1603	.gen = 4,
1604	.setup = i9xx_setup,
1605	.write_entry = i965_write_entry,
1606};
1607static const struct intel_gtt_driver g4x_gtt_driver = {
1608	.gen = 5,
1609	.setup = i9xx_setup,
1610	.write_entry = i965_write_entry,
1611};
1612static const struct intel_gtt_driver ironlake_gtt_driver = {
1613	.gen = 5,
1614	.is_ironlake = 1,
1615	.setup = i9xx_setup,
1616	.write_entry = i965_write_entry,
1617};
1618static const struct intel_gtt_driver sandybridge_gtt_driver = {
1619	.gen = 6,
1620	.setup = i9xx_setup,
1621	.write_entry = gen6_write_entry,
1622};
1623
1624/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1625 * driver and gmch_driver must be non-null, and find_gmch will determine
1626 * which one should be used if a gmch_chip_id is present.
1627 */
1628static const struct intel_gtt_driver_description {
1629	unsigned int gmch_chip_id;
1630	char *name;
1631	const struct agp_bridge_driver *gmch_driver;
1632	const struct intel_gtt_driver *gtt_driver;
1633} intel_gtt_chipsets[] = {
1634	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver , NULL},
1635	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver , NULL},
1636	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver , NULL},
1637	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver , NULL},
1638	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1639		&intel_830_driver , &i8xx_gtt_driver},
1640	{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
1641		&intel_830_driver , &i8xx_gtt_driver},
1642	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
1643		&intel_830_driver , &i8xx_gtt_driver},
1644	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1645		&intel_830_driver , &i8xx_gtt_driver},
1646	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
1647		&intel_830_driver , &i8xx_gtt_driver},
1648	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1649		&intel_915_driver , &i915_gtt_driver },
1650	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1651		&intel_915_driver , &i915_gtt_driver },
1652	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1653		&intel_915_driver , &i915_gtt_driver },
1654	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1655		&intel_915_driver , &i915_gtt_driver },
1656	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1657		&intel_915_driver , &i915_gtt_driver },
1658	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1659		&intel_915_driver , &i915_gtt_driver },
1660	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1661		&intel_i965_driver , &i965_gtt_driver },
1662	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1663		&intel_i965_driver , &i965_gtt_driver },
1664	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1665		&intel_i965_driver , &i965_gtt_driver },
1666	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1667		&intel_i965_driver , &i965_gtt_driver },
1668	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1669		&intel_i965_driver , &i965_gtt_driver },
1670	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1671		&intel_i965_driver , &i965_gtt_driver },
1672	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1673		&intel_g33_driver , &g33_gtt_driver },
1674	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1675		&intel_g33_driver , &g33_gtt_driver },
1676	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1677		&intel_g33_driver , &g33_gtt_driver },
1678	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1679		&intel_g33_driver , &pineview_gtt_driver },
1680	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1681		&intel_g33_driver , &pineview_gtt_driver },
1682	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1683		&intel_i965_driver , &g4x_gtt_driver },
1684	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1685		&intel_i965_driver , &g4x_gtt_driver },
1686	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1687		&intel_i965_driver , &g4x_gtt_driver },
1688	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1689		&intel_i965_driver , &g4x_gtt_driver },
1690	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1691		&intel_i965_driver , &g4x_gtt_driver },
1692	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1693		&intel_i965_driver , &g4x_gtt_driver },
1694	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1695		&intel_i965_driver , &g4x_gtt_driver },
1696	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1697	    "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver },
1698	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1699	    "HD Graphics", &intel_i965_driver , &ironlake_gtt_driver },
1700	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1701	    "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1702	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1703	    "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1704	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1705	    "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1706	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1707	    "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1708	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1709	    "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1710	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1711	    "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1712	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1713	    "Sandybridge", &intel_gen6_driver , &sandybridge_gtt_driver },
1714	{ 0, NULL, NULL }
1715};
1716
1717static int find_gmch(u16 device)
1718{
1719	struct pci_dev *gmch_device;
1720
1721	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1722	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1723		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1724					     device, gmch_device);
1725	}
1726
1727	if (!gmch_device)
1728		return 0;
1729
1730	intel_private.pcidev = gmch_device;
1731	return 1;
1732}
1733
1734int intel_gmch_probe(struct pci_dev *pdev,
1735				      struct agp_bridge_data *bridge)
1736{
1737	int i, mask;
1738	bridge->driver = NULL;
1739
1740	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1741		if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1742			bridge->driver =
1743				intel_gtt_chipsets[i].gmch_driver;
1744			intel_private.driver =
1745				intel_gtt_chipsets[i].gtt_driver;
1746			break;
1747		}
1748	}
1749
1750	if (!bridge->driver)
1751		return 0;
1752
1753	bridge->dev_private_data = &intel_private;
1754	bridge->dev = pdev;
1755
1756	intel_private.bridge_dev = pci_dev_get(pdev);
1757
1758	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1759
1760	if (bridge->driver->mask_memory == intel_gen6_mask_memory)
1761		mask = 40;
1762	else if (bridge->driver->mask_memory == intel_i965_mask_memory)
1763		mask = 36;
1764	else
1765		mask = 32;
1766
1767	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1768		dev_err(&intel_private.pcidev->dev,
1769			"set gfx device dma mask %d-bit failed!\n", mask);
1770	else
1771		pci_set_consistent_dma_mask(intel_private.pcidev,
1772					    DMA_BIT_MASK(mask));
1773
1774	if (bridge->driver == &intel_810_driver)
1775		return 1;
1776
1777	if (intel_gtt_init() != 0)
1778		return 0;
1779
1780	return 1;
1781}
1782EXPORT_SYMBOL(intel_gmch_probe);
1783
1784struct intel_gtt *intel_gtt_get(void)
1785{
1786	return &intel_private.base;
1787}
1788EXPORT_SYMBOL(intel_gtt_get);
1789
1790void intel_gmch_remove(struct pci_dev *pdev)
1791{
1792	if (intel_private.pcidev)
1793		pci_dev_put(intel_private.pcidev);
1794	if (intel_private.bridge_dev)
1795		pci_dev_put(intel_private.bridge_dev);
1796}
1797EXPORT_SYMBOL(intel_gmch_remove);
1798
1799MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1800MODULE_LICENSE("GPL and additional rights");
1801