1/*
2 * AGPGART driver.
3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2005 Dave Jones.
5 * Copyright (C) 1999 Jeff Hartmann.
6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * TODO:
28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
29 */
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/pagemap.h>
33#include <linux/miscdevice.h>
34#include <linux/pm.h>
35#include <linux/agp_backend.h>
36#include <linux/vmalloc.h>
37#include <linux/dma-mapping.h>
38#include <linux/mm.h>
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <asm/io.h>
42#include <asm/cacheflush.h>
43#include <asm/pgtable.h>
44#include "agp.h"
45
46__u32 *agp_gatt_table;
47int agp_memory_reserved;
48
49/*
50 * Needed by the Nforce GART driver for the time being. Would be
51 * nice to do this some other way instead of needing this export.
52 */
53EXPORT_SYMBOL_GPL(agp_memory_reserved);
54
55/*
56 * Generic routines for handling agp_memory structures -
57 * They use the basic page allocation routines to do the brunt of the work.
58 */
59
60void agp_free_key(int key)
61{
62	if (key < 0)
63		return;
64
65	if (key < MAXKEY)
66		clear_bit(key, agp_bridge->key_list);
67}
68EXPORT_SYMBOL(agp_free_key);
69
70
71static int agp_get_key(void)
72{
73	int bit;
74
75	bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
76	if (bit < MAXKEY) {
77		set_bit(bit, agp_bridge->key_list);
78		return bit;
79	}
80	return -1;
81}
82
83/*
84 * Use kmalloc if possible for the page list. Otherwise fall back to
85 * vmalloc. This speeds things up and also saves memory for small AGP
86 * regions.
87 */
88
89void agp_alloc_page_array(size_t size, struct agp_memory *mem)
90{
91	mem->pages = NULL;
92
93	if (size <= 2*PAGE_SIZE)
94		mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
95	if (mem->pages == NULL) {
96		mem->pages = vmalloc(size);
97	}
98}
99EXPORT_SYMBOL(agp_alloc_page_array);
100
101void agp_free_page_array(struct agp_memory *mem)
102{
103	if (is_vmalloc_addr(mem->pages)) {
104		vfree(mem->pages);
105	} else {
106		kfree(mem->pages);
107	}
108}
109EXPORT_SYMBOL(agp_free_page_array);
110
111
112static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
113{
114	struct agp_memory *new;
115	unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
116
117	if (INT_MAX/sizeof(struct page *) < num_agp_pages)
118		return NULL;
119
120	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
121	if (new == NULL)
122		return NULL;
123
124	new->key = agp_get_key();
125
126	if (new->key < 0) {
127		kfree(new);
128		return NULL;
129	}
130
131	agp_alloc_page_array(alloc_size, new);
132
133	if (new->pages == NULL) {
134		agp_free_key(new->key);
135		kfree(new);
136		return NULL;
137	}
138	new->num_scratch_pages = 0;
139	return new;
140}
141
142struct agp_memory *agp_create_memory(int scratch_pages)
143{
144	struct agp_memory *new;
145
146	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
147	if (new == NULL)
148		return NULL;
149
150	new->key = agp_get_key();
151
152	if (new->key < 0) {
153		kfree(new);
154		return NULL;
155	}
156
157	agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
158
159	if (new->pages == NULL) {
160		agp_free_key(new->key);
161		kfree(new);
162		return NULL;
163	}
164	new->num_scratch_pages = scratch_pages;
165	new->type = AGP_NORMAL_MEMORY;
166	return new;
167}
168EXPORT_SYMBOL(agp_create_memory);
169
170/**
171 *	agp_free_memory - free memory associated with an agp_memory pointer.
172 *
173 *	@curr:		agp_memory pointer to be freed.
174 *
175 *	It is the only function that can be called when the backend is not owned
176 *	by the caller.  (So it can free memory on client death.)
177 */
178void agp_free_memory(struct agp_memory *curr)
179{
180	size_t i;
181
182	if (curr == NULL)
183		return;
184
185	if (curr->is_bound)
186		agp_unbind_memory(curr);
187
188	if (curr->type >= AGP_USER_TYPES) {
189		agp_generic_free_by_type(curr);
190		return;
191	}
192
193	if (curr->type != 0) {
194		curr->bridge->driver->free_by_type(curr);
195		return;
196	}
197	if (curr->page_count != 0) {
198		if (curr->bridge->driver->agp_destroy_pages) {
199			curr->bridge->driver->agp_destroy_pages(curr);
200		} else {
201
202			for (i = 0; i < curr->page_count; i++) {
203				curr->bridge->driver->agp_destroy_page(
204					curr->pages[i],
205					AGP_PAGE_DESTROY_UNMAP);
206			}
207			for (i = 0; i < curr->page_count; i++) {
208				curr->bridge->driver->agp_destroy_page(
209					curr->pages[i],
210					AGP_PAGE_DESTROY_FREE);
211			}
212		}
213	}
214	agp_free_key(curr->key);
215	agp_free_page_array(curr);
216	kfree(curr);
217}
218EXPORT_SYMBOL(agp_free_memory);
219
220#define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
221
222/**
223 *	agp_allocate_memory  -  allocate a group of pages of a certain type.
224 *
225 *	@page_count:	size_t argument of the number of pages
226 *	@type:	u32 argument of the type of memory to be allocated.
227 *
228 *	Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
229 *	maps to physical ram.  Any other type is device dependent.
230 *
231 *	It returns NULL whenever memory is unavailable.
232 */
233struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
234					size_t page_count, u32 type)
235{
236	int scratch_pages;
237	struct agp_memory *new;
238	size_t i;
239	int cur_memory;
240
241	if (!bridge)
242		return NULL;
243
244	cur_memory = atomic_read(&bridge->current_memory_agp);
245	if ((cur_memory + page_count > bridge->max_memory_agp) ||
246	    (cur_memory + page_count < page_count))
247		return NULL;
248
249	if (type >= AGP_USER_TYPES) {
250		new = agp_generic_alloc_user(page_count, type);
251		if (new)
252			new->bridge = bridge;
253		return new;
254	}
255
256	if (type != 0) {
257		new = bridge->driver->alloc_by_type(page_count, type);
258		if (new)
259			new->bridge = bridge;
260		return new;
261	}
262
263	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
264
265	new = agp_create_memory(scratch_pages);
266
267	if (new == NULL)
268		return NULL;
269
270	if (bridge->driver->agp_alloc_pages) {
271		if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
272			agp_free_memory(new);
273			return NULL;
274		}
275		new->bridge = bridge;
276		return new;
277	}
278
279	for (i = 0; i < page_count; i++) {
280		struct page *page = bridge->driver->agp_alloc_page(bridge);
281
282		if (page == NULL) {
283			agp_free_memory(new);
284			return NULL;
285		}
286		new->pages[i] = page;
287		new->page_count++;
288	}
289	new->bridge = bridge;
290
291	return new;
292}
293EXPORT_SYMBOL(agp_allocate_memory);
294
295
296/* End - Generic routines for handling agp_memory structures */
297
298
299static int agp_return_size(void)
300{
301	int current_size;
302	void *temp;
303
304	temp = agp_bridge->current_size;
305
306	switch (agp_bridge->driver->size_type) {
307	case U8_APER_SIZE:
308		current_size = A_SIZE_8(temp)->size;
309		break;
310	case U16_APER_SIZE:
311		current_size = A_SIZE_16(temp)->size;
312		break;
313	case U32_APER_SIZE:
314		current_size = A_SIZE_32(temp)->size;
315		break;
316	case LVL2_APER_SIZE:
317		current_size = A_SIZE_LVL2(temp)->size;
318		break;
319	case FIXED_APER_SIZE:
320		current_size = A_SIZE_FIX(temp)->size;
321		break;
322	default:
323		current_size = 0;
324		break;
325	}
326
327	current_size -= (agp_memory_reserved / (1024*1024));
328	if (current_size <0)
329		current_size = 0;
330	return current_size;
331}
332
333
334int agp_num_entries(void)
335{
336	int num_entries;
337	void *temp;
338
339	temp = agp_bridge->current_size;
340
341	switch (agp_bridge->driver->size_type) {
342	case U8_APER_SIZE:
343		num_entries = A_SIZE_8(temp)->num_entries;
344		break;
345	case U16_APER_SIZE:
346		num_entries = A_SIZE_16(temp)->num_entries;
347		break;
348	case U32_APER_SIZE:
349		num_entries = A_SIZE_32(temp)->num_entries;
350		break;
351	case LVL2_APER_SIZE:
352		num_entries = A_SIZE_LVL2(temp)->num_entries;
353		break;
354	case FIXED_APER_SIZE:
355		num_entries = A_SIZE_FIX(temp)->num_entries;
356		break;
357	default:
358		num_entries = 0;
359		break;
360	}
361
362	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
363	if (num_entries<0)
364		num_entries = 0;
365	return num_entries;
366}
367EXPORT_SYMBOL_GPL(agp_num_entries);
368
369
370/**
371 *	agp_copy_info  -  copy bridge state information
372 *
373 *	@info:		agp_kern_info pointer.  The caller should insure that this pointer is valid.
374 *
375 *	This function copies information about the agp bridge device and the state of
376 *	the agp backend into an agp_kern_info pointer.
377 */
378int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
379{
380	memset(info, 0, sizeof(struct agp_kern_info));
381	if (!bridge) {
382		info->chipset = NOT_SUPPORTED;
383		return -EIO;
384	}
385
386	info->version.major = bridge->version->major;
387	info->version.minor = bridge->version->minor;
388	info->chipset = SUPPORTED;
389	info->device = bridge->dev;
390	if (bridge->mode & AGPSTAT_MODE_3_0)
391		info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
392	else
393		info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
394	info->aper_base = bridge->gart_bus_addr;
395	info->aper_size = agp_return_size();
396	info->max_memory = bridge->max_memory_agp;
397	info->current_memory = atomic_read(&bridge->current_memory_agp);
398	info->cant_use_aperture = bridge->driver->cant_use_aperture;
399	info->vm_ops = bridge->vm_ops;
400	info->page_mask = ~0UL;
401	return 0;
402}
403EXPORT_SYMBOL(agp_copy_info);
404
405/* End - Routine to copy over information structure */
406
407/*
408 * Routines for handling swapping of agp_memory into the GATT -
409 * These routines take agp_memory and insert them into the GATT.
410 * They call device specific routines to actually write to the GATT.
411 */
412
413/**
414 *	agp_bind_memory  -  Bind an agp_memory structure into the GATT.
415 *
416 *	@curr:		agp_memory pointer
417 *	@pg_start:	an offset into the graphics aperture translation table
418 *
419 *	It returns -EINVAL if the pointer == NULL.
420 *	It returns -EBUSY if the area of the table requested is already in use.
421 */
422int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
423{
424	int ret_val;
425
426	if (curr == NULL)
427		return -EINVAL;
428
429	if (curr->is_bound) {
430		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
431		return -EINVAL;
432	}
433	if (!curr->is_flushed) {
434		curr->bridge->driver->cache_flush();
435		curr->is_flushed = true;
436	}
437
438	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
439
440	if (ret_val != 0)
441		return ret_val;
442
443	curr->is_bound = true;
444	curr->pg_start = pg_start;
445	spin_lock(&agp_bridge->mapped_lock);
446	list_add(&curr->mapped_list, &agp_bridge->mapped_list);
447	spin_unlock(&agp_bridge->mapped_lock);
448
449	return 0;
450}
451EXPORT_SYMBOL(agp_bind_memory);
452
453
454/**
455 *	agp_unbind_memory  -  Removes an agp_memory structure from the GATT
456 *
457 * @curr:	agp_memory pointer to be removed from the GATT.
458 *
459 * It returns -EINVAL if this piece of agp_memory is not currently bound to
460 * the graphics aperture translation table or if the agp_memory pointer == NULL
461 */
462int agp_unbind_memory(struct agp_memory *curr)
463{
464	int ret_val;
465
466	if (curr == NULL)
467		return -EINVAL;
468
469	if (!curr->is_bound) {
470		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
471		return -EINVAL;
472	}
473
474	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
475
476	if (ret_val != 0)
477		return ret_val;
478
479	curr->is_bound = false;
480	curr->pg_start = 0;
481	spin_lock(&curr->bridge->mapped_lock);
482	list_del(&curr->mapped_list);
483	spin_unlock(&curr->bridge->mapped_lock);
484	return 0;
485}
486EXPORT_SYMBOL(agp_unbind_memory);
487
488
489/* End - Routines for handling swapping of agp_memory into the GATT */
490
491
492/* Generic Agp routines - Start */
493static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
494{
495	u32 tmp;
496
497	if (*requested_mode & AGP2_RESERVED_MASK) {
498		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
499			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
500		*requested_mode &= ~AGP2_RESERVED_MASK;
501	}
502
503	/*
504	 * Some dumb bridges are programmed to disobey the AGP2 spec.
505	 * This is likely a BIOS misprogramming rather than poweron default, or
506	 * it would be a lot more common.
507	 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
508	 * AGPv2 spec 6.1.9 states:
509	 *   The RATE field indicates the data transfer rates supported by this
510	 *   device. A.G.P. devices must report all that apply.
511	 * Fix them up as best we can.
512	 */
513	switch (*bridge_agpstat & 7) {
514	case 4:
515		*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
516		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
517			"Fixing up support for x2 & x1\n");
518		break;
519	case 2:
520		*bridge_agpstat |= AGPSTAT2_1X;
521		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
522			"Fixing up support for x1\n");
523		break;
524	default:
525		break;
526	}
527
528	/* Check the speed bits make sense. Only one should be set. */
529	tmp = *requested_mode & 7;
530	switch (tmp) {
531		case 0:
532			printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
533			*requested_mode |= AGPSTAT2_1X;
534			break;
535		case 1:
536		case 2:
537			break;
538		case 3:
539			*requested_mode &= ~(AGPSTAT2_1X);	/* rate=2 */
540			break;
541		case 4:
542			break;
543		case 5:
544		case 6:
545		case 7:
546			*requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
547			break;
548	}
549
550	/* disable SBA if it's not supported */
551	if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
552		*bridge_agpstat &= ~AGPSTAT_SBA;
553
554	/* Set rate */
555	if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
556		*bridge_agpstat &= ~AGPSTAT2_4X;
557
558	if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
559		*bridge_agpstat &= ~AGPSTAT2_2X;
560
561	if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
562		*bridge_agpstat &= ~AGPSTAT2_1X;
563
564	/* Now we know what mode it should be, clear out the unwanted bits. */
565	if (*bridge_agpstat & AGPSTAT2_4X)
566		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);	/* 4X */
567
568	if (*bridge_agpstat & AGPSTAT2_2X)
569		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);	/* 2X */
570
571	if (*bridge_agpstat & AGPSTAT2_1X)
572		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);	/* 1X */
573
574	/* Apply any errata. */
575	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
576		*bridge_agpstat &= ~AGPSTAT_FW;
577
578	if (agp_bridge->flags & AGP_ERRATA_SBA)
579		*bridge_agpstat &= ~AGPSTAT_SBA;
580
581	if (agp_bridge->flags & AGP_ERRATA_1X) {
582		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
583		*bridge_agpstat |= AGPSTAT2_1X;
584	}
585
586	/* If we've dropped down to 1X, disable fast writes. */
587	if (*bridge_agpstat & AGPSTAT2_1X)
588		*bridge_agpstat &= ~AGPSTAT_FW;
589}
590
591/*
592 * requested_mode = Mode requested by (typically) X.
593 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
594 * vga_agpstat = PCI_AGP_STATUS from graphic card.
595 */
596static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
597{
598	u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
599	u32 tmp;
600
601	if (*requested_mode & AGP3_RESERVED_MASK) {
602		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
603			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
604		*requested_mode &= ~AGP3_RESERVED_MASK;
605	}
606
607	/* Check the speed bits make sense. */
608	tmp = *requested_mode & 7;
609	if (tmp == 0) {
610		printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
611		*requested_mode |= AGPSTAT3_4X;
612	}
613	if (tmp >= 3) {
614		printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
615		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
616	}
617
618	/* ARQSZ - Set the value to the maximum one.
619	 * Don't allow the mode register to override values. */
620	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
621		max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
622
623	/* Calibration cycle.
624	 * Don't allow the mode register to override values. */
625	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
626		min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
627
628	/* SBA *must* be supported for AGP v3 */
629	*bridge_agpstat |= AGPSTAT_SBA;
630
631	/*
632	 * Set speed.
633	 * Check for invalid speeds. This can happen when applications
634	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
635	 */
636	if (*requested_mode & AGPSTAT_MODE_3_0) {
637		/*
638		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
639		 * have been passed a 3.0 mode, but with 2.x speed bits set.
640		 * AGP2.x 4x -> AGP3.0 4x.
641		 */
642		if (*requested_mode & AGPSTAT2_4X) {
643			printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
644						current->comm, *requested_mode);
645			*requested_mode &= ~AGPSTAT2_4X;
646			*requested_mode |= AGPSTAT3_4X;
647		}
648	} else {
649		/*
650		 * The caller doesn't know what they are doing. We are in 3.0 mode,
651		 * but have been passed an AGP 2.x mode.
652		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
653		 */
654		printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
655					current->comm, *requested_mode);
656		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
657		*requested_mode |= AGPSTAT3_4X;
658	}
659
660	if (*requested_mode & AGPSTAT3_8X) {
661		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
662			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
663			*bridge_agpstat |= AGPSTAT3_4X;
664			printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
665			return;
666		}
667		if (!(*vga_agpstat & AGPSTAT3_8X)) {
668			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
669			*bridge_agpstat |= AGPSTAT3_4X;
670			printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
671			return;
672		}
673		/* All set, bridge & device can do AGP x8*/
674		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
675		goto done;
676
677	} else if (*requested_mode & AGPSTAT3_4X) {
678		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
679		*bridge_agpstat |= AGPSTAT3_4X;
680		goto done;
681
682	} else {
683
684		/*
685		 * If we didn't specify an AGP mode, we see if both
686		 * the graphics card, and the bridge can do x8, and use if so.
687		 * If not, we fall back to x4 mode.
688		 */
689		if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
690			printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
691				"supported by bridge & card (x8).\n");
692			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
693			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
694		} else {
695			printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
696			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
697				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
698					*bridge_agpstat, origbridge);
699				*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
700				*bridge_agpstat |= AGPSTAT3_4X;
701			}
702			if (!(*vga_agpstat & AGPSTAT3_8X)) {
703				printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
704					*vga_agpstat, origvga);
705				*vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
706				*vga_agpstat |= AGPSTAT3_4X;
707			}
708		}
709	}
710
711done:
712	/* Apply any errata. */
713	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
714		*bridge_agpstat &= ~AGPSTAT_FW;
715
716	if (agp_bridge->flags & AGP_ERRATA_SBA)
717		*bridge_agpstat &= ~AGPSTAT_SBA;
718
719	if (agp_bridge->flags & AGP_ERRATA_1X) {
720		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
721		*bridge_agpstat |= AGPSTAT2_1X;
722	}
723}
724
725
726/**
727 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
728 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
729 * @requested_mode: requested agp_stat from userspace (Typically from X)
730 * @bridge_agpstat: current agp_stat from AGP bridge.
731 *
732 * This function will hunt for an AGP graphics card, and try to match
733 * the requested mode to the capabilities of both the bridge and the card.
734 */
735u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
736{
737	struct pci_dev *device = NULL;
738	u32 vga_agpstat;
739	u8 cap_ptr;
740
741	for (;;) {
742		device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
743		if (!device) {
744			printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
745			return 0;
746		}
747		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
748		if (cap_ptr)
749			break;
750	}
751
752	/*
753	 * Ok, here we have a AGP device. Disable impossible
754	 * settings, and adjust the readqueue to the minimum.
755	 */
756	pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
757
758	/* adjust RQ depth */
759	bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
760	     min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
761		 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
762
763	/* disable FW if it's not supported */
764	if (!((bridge_agpstat & AGPSTAT_FW) &&
765		 (vga_agpstat & AGPSTAT_FW) &&
766		 (requested_mode & AGPSTAT_FW)))
767		bridge_agpstat &= ~AGPSTAT_FW;
768
769	/* Check to see if we are operating in 3.0 mode */
770	if (agp_bridge->mode & AGPSTAT_MODE_3_0)
771		agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
772	else
773		agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
774
775	pci_dev_put(device);
776	return bridge_agpstat;
777}
778EXPORT_SYMBOL(agp_collect_device_status);
779
780
781void agp_device_command(u32 bridge_agpstat, bool agp_v3)
782{
783	struct pci_dev *device = NULL;
784	int mode;
785
786	mode = bridge_agpstat & 0x7;
787	if (agp_v3)
788		mode *= 4;
789
790	for_each_pci_dev(device) {
791		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
792		if (!agp)
793			continue;
794
795		dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
796			 agp_v3 ? 3 : 2, mode);
797		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
798	}
799}
800EXPORT_SYMBOL(agp_device_command);
801
802
803void get_agp_version(struct agp_bridge_data *bridge)
804{
805	u32 ncapid;
806
807	/* Exit early if already set by errata workarounds. */
808	if (bridge->major_version != 0)
809		return;
810
811	pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
812	bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
813	bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
814}
815EXPORT_SYMBOL(get_agp_version);
816
817
818void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
819{
820	u32 bridge_agpstat, temp;
821
822	get_agp_version(agp_bridge);
823
824	dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
825		 agp_bridge->major_version, agp_bridge->minor_version);
826
827	pci_read_config_dword(agp_bridge->dev,
828		      agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
829
830	bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
831	if (bridge_agpstat == 0)
832		/* Something bad happened. FIXME: Return error code? */
833		return;
834
835	bridge_agpstat |= AGPSTAT_AGP_ENABLE;
836
837	/* Do AGP version specific frobbing. */
838	if (bridge->major_version >= 3) {
839		if (bridge->mode & AGPSTAT_MODE_3_0) {
840			/* If we have 3.5, we can do the isoch stuff. */
841			if (bridge->minor_version >= 5)
842				agp_3_5_enable(bridge);
843			agp_device_command(bridge_agpstat, true);
844			return;
845		} else {
846		    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
847		    bridge_agpstat &= ~(7<<10) ;
848		    pci_read_config_dword(bridge->dev,
849					bridge->capndx+AGPCTRL, &temp);
850		    temp |= (1<<9);
851		    pci_write_config_dword(bridge->dev,
852					bridge->capndx+AGPCTRL, temp);
853
854		    dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
855		}
856	}
857
858	/* AGP v<3 */
859	agp_device_command(bridge_agpstat, false);
860}
861EXPORT_SYMBOL(agp_generic_enable);
862
863
864int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
865{
866	char *table;
867	char *table_end;
868	int size;
869	int page_order;
870	int num_entries;
871	int i;
872	void *temp;
873	struct page *page;
874
875	/* The generic routines can't handle 2 level gatt's */
876	if (bridge->driver->size_type == LVL2_APER_SIZE)
877		return -EINVAL;
878
879	table = NULL;
880	i = bridge->aperture_size_idx;
881	temp = bridge->current_size;
882	size = page_order = num_entries = 0;
883
884	if (bridge->driver->size_type != FIXED_APER_SIZE) {
885		do {
886			switch (bridge->driver->size_type) {
887			case U8_APER_SIZE:
888				size = A_SIZE_8(temp)->size;
889				page_order =
890				    A_SIZE_8(temp)->page_order;
891				num_entries =
892				    A_SIZE_8(temp)->num_entries;
893				break;
894			case U16_APER_SIZE:
895				size = A_SIZE_16(temp)->size;
896				page_order = A_SIZE_16(temp)->page_order;
897				num_entries = A_SIZE_16(temp)->num_entries;
898				break;
899			case U32_APER_SIZE:
900				size = A_SIZE_32(temp)->size;
901				page_order = A_SIZE_32(temp)->page_order;
902				num_entries = A_SIZE_32(temp)->num_entries;
903				break;
904				/* This case will never really happen. */
905			case FIXED_APER_SIZE:
906			case LVL2_APER_SIZE:
907			default:
908				size = page_order = num_entries = 0;
909				break;
910			}
911
912			table = alloc_gatt_pages(page_order);
913
914			if (table == NULL) {
915				i++;
916				switch (bridge->driver->size_type) {
917				case U8_APER_SIZE:
918					bridge->current_size = A_IDX8(bridge);
919					break;
920				case U16_APER_SIZE:
921					bridge->current_size = A_IDX16(bridge);
922					break;
923				case U32_APER_SIZE:
924					bridge->current_size = A_IDX32(bridge);
925					break;
926				/* These cases will never really happen. */
927				case FIXED_APER_SIZE:
928				case LVL2_APER_SIZE:
929				default:
930					break;
931				}
932				temp = bridge->current_size;
933			} else {
934				bridge->aperture_size_idx = i;
935			}
936		} while (!table && (i < bridge->driver->num_aperture_sizes));
937	} else {
938		size = ((struct aper_size_info_fixed *) temp)->size;
939		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
940		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
941		table = alloc_gatt_pages(page_order);
942	}
943
944	if (table == NULL)
945		return -ENOMEM;
946
947	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
948
949	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
950		SetPageReserved(page);
951
952	bridge->gatt_table_real = (u32 *) table;
953	agp_gatt_table = (void *)table;
954
955	bridge->driver->cache_flush();
956#ifdef CONFIG_X86
957	if (set_memory_uc((unsigned long)table, 1 << page_order))
958		printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
959
960	bridge->gatt_table = (u32 __iomem *)table;
961#else
962	bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
963					(PAGE_SIZE * (1 << page_order)));
964	bridge->driver->cache_flush();
965#endif
966
967	if (bridge->gatt_table == NULL) {
968		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
969			ClearPageReserved(page);
970
971		free_gatt_pages(table, page_order);
972
973		return -ENOMEM;
974	}
975	bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
976
977	/* AK: bogus, should encode addresses > 4GB */
978	for (i = 0; i < num_entries; i++) {
979		writel(bridge->scratch_page, bridge->gatt_table+i);
980		readl(bridge->gatt_table+i);	/* PCI Posting. */
981	}
982
983	return 0;
984}
985EXPORT_SYMBOL(agp_generic_create_gatt_table);
986
987int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
988{
989	int page_order;
990	char *table, *table_end;
991	void *temp;
992	struct page *page;
993
994	temp = bridge->current_size;
995
996	switch (bridge->driver->size_type) {
997	case U8_APER_SIZE:
998		page_order = A_SIZE_8(temp)->page_order;
999		break;
1000	case U16_APER_SIZE:
1001		page_order = A_SIZE_16(temp)->page_order;
1002		break;
1003	case U32_APER_SIZE:
1004		page_order = A_SIZE_32(temp)->page_order;
1005		break;
1006	case FIXED_APER_SIZE:
1007		page_order = A_SIZE_FIX(temp)->page_order;
1008		break;
1009	case LVL2_APER_SIZE:
1010		/* The generic routines can't deal with 2 level gatt's */
1011		return -EINVAL;
1012	default:
1013		page_order = 0;
1014		break;
1015	}
1016
1017	/* Do not worry about freeing memory, because if this is
1018	 * called, then all agp memory is deallocated and removed
1019	 * from the table. */
1020
1021#ifdef CONFIG_X86
1022	set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1023#else
1024	iounmap(bridge->gatt_table);
1025#endif
1026	table = (char *) bridge->gatt_table_real;
1027	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1028
1029	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1030		ClearPageReserved(page);
1031
1032	free_gatt_pages(bridge->gatt_table_real, page_order);
1033
1034	agp_gatt_table = NULL;
1035	bridge->gatt_table = NULL;
1036	bridge->gatt_table_real = NULL;
1037	bridge->gatt_bus_addr = 0;
1038
1039	return 0;
1040}
1041EXPORT_SYMBOL(agp_generic_free_gatt_table);
1042
1043
1044int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1045{
1046	int num_entries;
1047	size_t i;
1048	off_t j;
1049	void *temp;
1050	struct agp_bridge_data *bridge;
1051	int mask_type;
1052
1053	bridge = mem->bridge;
1054	if (!bridge)
1055		return -EINVAL;
1056
1057	if (mem->page_count == 0)
1058		return 0;
1059
1060	temp = bridge->current_size;
1061
1062	switch (bridge->driver->size_type) {
1063	case U8_APER_SIZE:
1064		num_entries = A_SIZE_8(temp)->num_entries;
1065		break;
1066	case U16_APER_SIZE:
1067		num_entries = A_SIZE_16(temp)->num_entries;
1068		break;
1069	case U32_APER_SIZE:
1070		num_entries = A_SIZE_32(temp)->num_entries;
1071		break;
1072	case FIXED_APER_SIZE:
1073		num_entries = A_SIZE_FIX(temp)->num_entries;
1074		break;
1075	case LVL2_APER_SIZE:
1076		/* The generic routines can't deal with 2 level gatt's */
1077		return -EINVAL;
1078	default:
1079		num_entries = 0;
1080		break;
1081	}
1082
1083	num_entries -= agp_memory_reserved/PAGE_SIZE;
1084	if (num_entries < 0) num_entries = 0;
1085
1086	if (type != mem->type)
1087		return -EINVAL;
1088
1089	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1090	if (mask_type != 0) {
1091		/* The generic routines know nothing of memory types */
1092		return -EINVAL;
1093	}
1094
1095	if (((pg_start + mem->page_count) > num_entries) ||
1096	    ((pg_start + mem->page_count) < pg_start))
1097		return -EINVAL;
1098
1099	j = pg_start;
1100
1101	while (j < (pg_start + mem->page_count)) {
1102		if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1103			return -EBUSY;
1104		j++;
1105	}
1106
1107	if (!mem->is_flushed) {
1108		bridge->driver->cache_flush();
1109		mem->is_flushed = true;
1110	}
1111
1112	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1113		writel(bridge->driver->mask_memory(bridge,
1114						   page_to_phys(mem->pages[i]),
1115						   mask_type),
1116		       bridge->gatt_table+j);
1117	}
1118	readl(bridge->gatt_table+j-1);	/* PCI Posting. */
1119
1120	bridge->driver->tlb_flush(mem);
1121	return 0;
1122}
1123EXPORT_SYMBOL(agp_generic_insert_memory);
1124
1125
1126int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1127{
1128	size_t i;
1129	struct agp_bridge_data *bridge;
1130	int mask_type, num_entries;
1131
1132	bridge = mem->bridge;
1133	if (!bridge)
1134		return -EINVAL;
1135
1136	if (mem->page_count == 0)
1137		return 0;
1138
1139	if (type != mem->type)
1140		return -EINVAL;
1141
1142	num_entries = agp_num_entries();
1143	if (((pg_start + mem->page_count) > num_entries) ||
1144	    ((pg_start + mem->page_count) < pg_start))
1145		return -EINVAL;
1146
1147	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1148	if (mask_type != 0) {
1149		/* The generic routines know nothing of memory types */
1150		return -EINVAL;
1151	}
1152
1153	/* AK: bogus, should encode addresses > 4GB */
1154	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1155		writel(bridge->scratch_page, bridge->gatt_table+i);
1156	}
1157	readl(bridge->gatt_table+i-1);	/* PCI Posting. */
1158
1159	bridge->driver->tlb_flush(mem);
1160	return 0;
1161}
1162EXPORT_SYMBOL(agp_generic_remove_memory);
1163
1164struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1165{
1166	return NULL;
1167}
1168EXPORT_SYMBOL(agp_generic_alloc_by_type);
1169
1170void agp_generic_free_by_type(struct agp_memory *curr)
1171{
1172	agp_free_page_array(curr);
1173	agp_free_key(curr->key);
1174	kfree(curr);
1175}
1176EXPORT_SYMBOL(agp_generic_free_by_type);
1177
1178struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1179{
1180	struct agp_memory *new;
1181	int i;
1182	int pages;
1183
1184	pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1185	new = agp_create_user_memory(page_count);
1186	if (new == NULL)
1187		return NULL;
1188
1189	for (i = 0; i < page_count; i++)
1190		new->pages[i] = NULL;
1191	new->page_count = 0;
1192	new->type = type;
1193	new->num_scratch_pages = pages;
1194
1195	return new;
1196}
1197EXPORT_SYMBOL(agp_generic_alloc_user);
1198
1199/*
1200 * Basic Page Allocation Routines -
1201 * These routines handle page allocation and by default they reserve the allocated
1202 * memory.  They also handle incrementing the current_memory_agp value, Which is checked
1203 * against a maximum value.
1204 */
1205
1206int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
1207{
1208	struct page * page;
1209	int i, ret = -ENOMEM;
1210
1211	for (i = 0; i < num_pages; i++) {
1212		page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1213		/* agp_free_memory() needs gart address */
1214		if (page == NULL)
1215			goto out;
1216
1217#ifndef CONFIG_X86
1218		map_page_into_agp(page);
1219#endif
1220		get_page(page);
1221		atomic_inc(&agp_bridge->current_memory_agp);
1222
1223		mem->pages[i] = page;
1224		mem->page_count++;
1225	}
1226
1227#ifdef CONFIG_X86
1228	set_pages_array_uc(mem->pages, num_pages);
1229#endif
1230	ret = 0;
1231out:
1232	return ret;
1233}
1234EXPORT_SYMBOL(agp_generic_alloc_pages);
1235
1236struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1237{
1238	struct page * page;
1239
1240	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1241	if (page == NULL)
1242		return NULL;
1243
1244	map_page_into_agp(page);
1245
1246	get_page(page);
1247	atomic_inc(&agp_bridge->current_memory_agp);
1248	return page;
1249}
1250EXPORT_SYMBOL(agp_generic_alloc_page);
1251
1252void agp_generic_destroy_pages(struct agp_memory *mem)
1253{
1254	int i;
1255	struct page *page;
1256
1257	if (!mem)
1258		return;
1259
1260#ifdef CONFIG_X86
1261	set_pages_array_wb(mem->pages, mem->page_count);
1262#endif
1263
1264	for (i = 0; i < mem->page_count; i++) {
1265		page = mem->pages[i];
1266
1267#ifndef CONFIG_X86
1268		unmap_page_from_agp(page);
1269#endif
1270		put_page(page);
1271		__free_page(page);
1272		atomic_dec(&agp_bridge->current_memory_agp);
1273		mem->pages[i] = NULL;
1274	}
1275}
1276EXPORT_SYMBOL(agp_generic_destroy_pages);
1277
1278void agp_generic_destroy_page(struct page *page, int flags)
1279{
1280	if (page == NULL)
1281		return;
1282
1283	if (flags & AGP_PAGE_DESTROY_UNMAP)
1284		unmap_page_from_agp(page);
1285
1286	if (flags & AGP_PAGE_DESTROY_FREE) {
1287		put_page(page);
1288		__free_page(page);
1289		atomic_dec(&agp_bridge->current_memory_agp);
1290	}
1291}
1292EXPORT_SYMBOL(agp_generic_destroy_page);
1293
1294/* End Basic Page Allocation Routines */
1295
1296
1297/**
1298 * agp_enable  -  initialise the agp point-to-point connection.
1299 *
1300 * @mode:	agp mode register value to configure with.
1301 */
1302void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1303{
1304	if (!bridge)
1305		return;
1306	bridge->driver->agp_enable(bridge, mode);
1307}
1308EXPORT_SYMBOL(agp_enable);
1309
1310/* When we remove the global variable agp_bridge from all drivers
1311 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1312 */
1313
1314struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1315{
1316	if (list_empty(&agp_bridges))
1317		return NULL;
1318
1319	return agp_bridge;
1320}
1321
1322static void ipi_handler(void *null)
1323{
1324	flush_agp_cache();
1325}
1326
1327void global_cache_flush(void)
1328{
1329	if (on_each_cpu(ipi_handler, NULL, 1) != 0)
1330		panic(PFX "timed out waiting for the other CPUs!\n");
1331}
1332EXPORT_SYMBOL(global_cache_flush);
1333
1334unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1335				      dma_addr_t addr, int type)
1336{
1337	/* memory type is ignored in the generic routine */
1338	if (bridge->driver->masks)
1339		return addr | bridge->driver->masks[0].mask;
1340	else
1341		return addr;
1342}
1343EXPORT_SYMBOL(agp_generic_mask_memory);
1344
1345int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1346				  int type)
1347{
1348	if (type >= AGP_USER_TYPES)
1349		return 0;
1350	return type;
1351}
1352EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1353
1354/*
1355 * These functions are implemented according to the AGPv3 spec,
1356 * which covers implementation details that had previously been
1357 * left open.
1358 */
1359
1360int agp3_generic_fetch_size(void)
1361{
1362	u16 temp_size;
1363	int i;
1364	struct aper_size_info_16 *values;
1365
1366	pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1367	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1368
1369	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1370		if (temp_size == values[i].size_value) {
1371			agp_bridge->previous_size =
1372				agp_bridge->current_size = (void *) (values + i);
1373
1374			agp_bridge->aperture_size_idx = i;
1375			return values[i].size;
1376		}
1377	}
1378	return 0;
1379}
1380EXPORT_SYMBOL(agp3_generic_fetch_size);
1381
1382void agp3_generic_tlbflush(struct agp_memory *mem)
1383{
1384	u32 ctrl;
1385	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1386	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1387	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1388}
1389EXPORT_SYMBOL(agp3_generic_tlbflush);
1390
1391int agp3_generic_configure(void)
1392{
1393	u32 temp;
1394	struct aper_size_info_16 *current_size;
1395
1396	current_size = A_SIZE_16(agp_bridge->current_size);
1397
1398	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
1399						    AGP_APERTURE_BAR);
1400
1401	/* set aperture size */
1402	pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1403	/* set gart pointer */
1404	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1405	/* enable aperture and GTLB */
1406	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1407	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1408	return 0;
1409}
1410EXPORT_SYMBOL(agp3_generic_configure);
1411
1412void agp3_generic_cleanup(void)
1413{
1414	u32 ctrl;
1415	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1416	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1417}
1418EXPORT_SYMBOL(agp3_generic_cleanup);
1419
1420const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1421{
1422	{4096, 1048576, 10,0x000},
1423	{2048,  524288, 9, 0x800},
1424	{1024,  262144, 8, 0xc00},
1425	{ 512,  131072, 7, 0xe00},
1426	{ 256,   65536, 6, 0xf00},
1427	{ 128,   32768, 5, 0xf20},
1428	{  64,   16384, 4, 0xf30},
1429	{  32,    8192, 3, 0xf38},
1430	{  16,    4096, 2, 0xf3c},
1431	{   8,    2048, 1, 0xf3e},
1432	{   4,    1024, 0, 0xf3f}
1433};
1434EXPORT_SYMBOL(agp3_generic_sizes);
1435
1436