intel-gtt.c revision 0af9e92e779602bdd6d4d19acf63b4802fab91b6
1/* 2 * Intel GTT (Graphics Translation Table) routines 3 * 4 * Caveat: This driver implements the linux agp interface, but this is far from 5 * a agp driver! GTT support ended up here for purely historical reasons: The 6 * old userspace intel graphics drivers needed an interface to map memory into 7 * the GTT. And the drm provides a default interface for graphic devices sitting 8 * on an agp port. So it made sense to fake the GTT support as an agp port to 9 * avoid having to create a new api. 10 * 11 * With gem this does not make much sense anymore, just needlessly complicates 12 * the code. But as long as the old graphics stack is still support, it's stuck 13 * here. 14 * 15 * /fairy-tale-mode off 16 */ 17 18#include <linux/module.h> 19#include <linux/pci.h> 20#include <linux/init.h> 21#include <linux/kernel.h> 22#include <linux/pagemap.h> 23#include <linux/agp_backend.h> 24#include <asm/smp.h> 25#include "agp.h" 26#include "intel-agp.h" 27#include <linux/intel-gtt.h> 28#include <drm/intel-gtt.h> 29 30/* 31 * If we have Intel graphics, we're not going to have anything other than 32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent 33 * on the Intel IOMMU support (CONFIG_DMAR). 34 * Only newer chipsets need to bother with this, of course. 35 */ 36#ifdef CONFIG_DMAR 37#define USE_PCI_DMA_API 1 38#else 39#define USE_PCI_DMA_API 0 40#endif 41 42/* Max amount of stolen space, anything above will be returned to Linux */ 43int intel_max_stolen = 32 * 1024 * 1024; 44 45static const struct aper_size_info_fixed intel_i810_sizes[] = 46{ 47 {64, 16384, 4}, 48 /* The 32M mode still requires a 64k gatt */ 49 {32, 8192, 4} 50}; 51 52#define AGP_DCACHE_MEMORY 1 53#define AGP_PHYS_MEMORY 2 54#define INTEL_AGP_CACHED_MEMORY 3 55 56static struct gatt_mask intel_i810_masks[] = 57{ 58 {.mask = I810_PTE_VALID, .type = 0}, 59 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, 60 {.mask = I810_PTE_VALID, .type = 0}, 61 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, 62 .type = INTEL_AGP_CACHED_MEMORY} 63}; 64 65#define INTEL_AGP_UNCACHED_MEMORY 0 66#define INTEL_AGP_CACHED_MEMORY_LLC 1 67#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2 68#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 69#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 70 71struct intel_gtt_driver { 72 unsigned int gen : 8; 73 unsigned int is_g33 : 1; 74 unsigned int is_pineview : 1; 75 unsigned int is_ironlake : 1; 76 /* Chipset specific GTT setup */ 77 int (*setup)(void); 78 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); 79 /* Flags is a more or less chipset specific opaque value. 80 * For chipsets that need to support old ums (non-gem) code, this 81 * needs to be identical to the various supported agp memory types! */ 82 bool (*check_flags)(unsigned int flags); 83 void (*chipset_flush)(void); 84}; 85 86static struct _intel_private { 87 struct intel_gtt base; 88 const struct intel_gtt_driver *driver; 89 struct pci_dev *pcidev; /* device one */ 90 struct pci_dev *bridge_dev; 91 u8 __iomem *registers; 92 phys_addr_t gtt_bus_addr; 93 phys_addr_t gma_bus_addr; 94 phys_addr_t pte_bus_addr; 95 u32 __iomem *gtt; /* I915G */ 96 int num_dcache_entries; 97 union { 98 void __iomem *i9xx_flush_page; 99 void *i8xx_flush_page; 100 }; 101 struct page *i8xx_page; 102 struct resource ifp_resource; 103 int resource_valid; 104 struct page *scratch_page; 105 dma_addr_t scratch_page_dma; 106} intel_private; 107 108#define INTEL_GTT_GEN intel_private.driver->gen 109#define IS_G33 intel_private.driver->is_g33 110#define IS_PINEVIEW intel_private.driver->is_pineview 111#define IS_IRONLAKE intel_private.driver->is_ironlake 112 113static void intel_agp_free_sglist(struct agp_memory *mem) 114{ 115 struct sg_table st; 116 117 st.sgl = mem->sg_list; 118 st.orig_nents = st.nents = mem->page_count; 119 120 sg_free_table(&st); 121 122 mem->sg_list = NULL; 123 mem->num_sg = 0; 124} 125 126static int intel_agp_map_memory(struct agp_memory *mem) 127{ 128 struct sg_table st; 129 struct scatterlist *sg; 130 int i; 131 132 if (mem->sg_list) 133 return 0; /* already mapped (for e.g. resume */ 134 135 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); 136 137 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) 138 goto err; 139 140 mem->sg_list = sg = st.sgl; 141 142 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) 143 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); 144 145 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, 146 mem->page_count, PCI_DMA_BIDIRECTIONAL); 147 if (unlikely(!mem->num_sg)) 148 goto err; 149 150 return 0; 151 152err: 153 sg_free_table(&st); 154 return -ENOMEM; 155} 156 157static void intel_agp_unmap_memory(struct agp_memory *mem) 158{ 159 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); 160 161 pci_unmap_sg(intel_private.pcidev, mem->sg_list, 162 mem->page_count, PCI_DMA_BIDIRECTIONAL); 163 intel_agp_free_sglist(mem); 164} 165 166static int intel_i810_fetch_size(void) 167{ 168 u32 smram_miscc; 169 struct aper_size_info_fixed *values; 170 171 pci_read_config_dword(intel_private.bridge_dev, 172 I810_SMRAM_MISCC, &smram_miscc); 173 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); 174 175 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { 176 dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n"); 177 return 0; 178 } 179 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { 180 agp_bridge->current_size = (void *) (values + 1); 181 agp_bridge->aperture_size_idx = 1; 182 return values[1].size; 183 } else { 184 agp_bridge->current_size = (void *) (values); 185 agp_bridge->aperture_size_idx = 0; 186 return values[0].size; 187 } 188 189 return 0; 190} 191 192static int intel_i810_configure(void) 193{ 194 struct aper_size_info_fixed *current_size; 195 u32 temp; 196 int i; 197 198 current_size = A_SIZE_FIX(agp_bridge->current_size); 199 200 if (!intel_private.registers) { 201 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); 202 temp &= 0xfff80000; 203 204 intel_private.registers = ioremap(temp, 128 * 4096); 205 if (!intel_private.registers) { 206 dev_err(&intel_private.pcidev->dev, 207 "can't remap memory\n"); 208 return -ENOMEM; 209 } 210 } 211 212 if ((readl(intel_private.registers+I810_DRAM_CTL) 213 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { 214 /* This will need to be dynamically assigned */ 215 dev_info(&intel_private.pcidev->dev, 216 "detected 4MB dedicated video ram\n"); 217 intel_private.num_dcache_entries = 1024; 218 } 219 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); 220 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 221 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); 222 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 223 224 if (agp_bridge->driver->needs_scratch_page) { 225 for (i = 0; i < current_size->num_entries; i++) { 226 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 227 } 228 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ 229 } 230 global_cache_flush(); 231 return 0; 232} 233 234static void intel_i810_cleanup(void) 235{ 236 writel(0, intel_private.registers+I810_PGETBL_CTL); 237 readl(intel_private.registers); /* PCI Posting. */ 238 iounmap(intel_private.registers); 239} 240 241static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) 242{ 243 return; 244} 245 246/* Exists to support ARGB cursors */ 247static struct page *i8xx_alloc_pages(void) 248{ 249 struct page *page; 250 251 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); 252 if (page == NULL) 253 return NULL; 254 255 if (set_pages_uc(page, 4) < 0) { 256 set_pages_wb(page, 4); 257 __free_pages(page, 2); 258 return NULL; 259 } 260 get_page(page); 261 atomic_inc(&agp_bridge->current_memory_agp); 262 return page; 263} 264 265static void i8xx_destroy_pages(struct page *page) 266{ 267 if (page == NULL) 268 return; 269 270 set_pages_wb(page, 4); 271 put_page(page); 272 __free_pages(page, 2); 273 atomic_dec(&agp_bridge->current_memory_agp); 274} 275 276static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 277 int type) 278{ 279 int i, j, num_entries; 280 void *temp; 281 int ret = -EINVAL; 282 int mask_type; 283 284 if (mem->page_count == 0) 285 goto out; 286 287 temp = agp_bridge->current_size; 288 num_entries = A_SIZE_FIX(temp)->num_entries; 289 290 if ((pg_start + mem->page_count) > num_entries) 291 goto out_err; 292 293 294 for (j = pg_start; j < (pg_start + mem->page_count); j++) { 295 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { 296 ret = -EBUSY; 297 goto out_err; 298 } 299 } 300 301 if (type != mem->type) 302 goto out_err; 303 304 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 305 306 switch (mask_type) { 307 case AGP_DCACHE_MEMORY: 308 if (!mem->is_flushed) 309 global_cache_flush(); 310 for (i = pg_start; i < (pg_start + mem->page_count); i++) { 311 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, 312 intel_private.registers+I810_PTE_BASE+(i*4)); 313 } 314 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 315 break; 316 case AGP_PHYS_MEMORY: 317 case AGP_NORMAL_MEMORY: 318 if (!mem->is_flushed) 319 global_cache_flush(); 320 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 321 writel(agp_bridge->driver->mask_memory(agp_bridge, 322 page_to_phys(mem->pages[i]), mask_type), 323 intel_private.registers+I810_PTE_BASE+(j*4)); 324 } 325 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); 326 break; 327 default: 328 goto out_err; 329 } 330 331out: 332 ret = 0; 333out_err: 334 mem->is_flushed = true; 335 return ret; 336} 337 338static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, 339 int type) 340{ 341 int i; 342 343 if (mem->page_count == 0) 344 return 0; 345 346 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 347 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 348 } 349 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 350 351 return 0; 352} 353 354/* 355 * The i810/i830 requires a physical address to program its mouse 356 * pointer into hardware. 357 * However the Xserver still writes to it through the agp aperture. 358 */ 359static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) 360{ 361 struct agp_memory *new; 362 struct page *page; 363 364 switch (pg_count) { 365 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); 366 break; 367 case 4: 368 /* kludge to get 4 physical pages for ARGB cursor */ 369 page = i8xx_alloc_pages(); 370 break; 371 default: 372 return NULL; 373 } 374 375 if (page == NULL) 376 return NULL; 377 378 new = agp_create_memory(pg_count); 379 if (new == NULL) 380 return NULL; 381 382 new->pages[0] = page; 383 if (pg_count == 4) { 384 /* kludge to get 4 physical pages for ARGB cursor */ 385 new->pages[1] = new->pages[0] + 1; 386 new->pages[2] = new->pages[1] + 1; 387 new->pages[3] = new->pages[2] + 1; 388 } 389 new->page_count = pg_count; 390 new->num_scratch_pages = pg_count; 391 new->type = AGP_PHYS_MEMORY; 392 new->physical = page_to_phys(new->pages[0]); 393 return new; 394} 395 396static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) 397{ 398 struct agp_memory *new; 399 400 if (type == AGP_DCACHE_MEMORY) { 401 if (pg_count != intel_private.num_dcache_entries) 402 return NULL; 403 404 new = agp_create_memory(1); 405 if (new == NULL) 406 return NULL; 407 408 new->type = AGP_DCACHE_MEMORY; 409 new->page_count = pg_count; 410 new->num_scratch_pages = 0; 411 agp_free_page_array(new); 412 return new; 413 } 414 if (type == AGP_PHYS_MEMORY) 415 return alloc_agpphysmem_i8xx(pg_count, type); 416 return NULL; 417} 418 419static void intel_i810_free_by_type(struct agp_memory *curr) 420{ 421 agp_free_key(curr->key); 422 if (curr->type == AGP_PHYS_MEMORY) { 423 if (curr->page_count == 4) 424 i8xx_destroy_pages(curr->pages[0]); 425 else { 426 agp_bridge->driver->agp_destroy_page(curr->pages[0], 427 AGP_PAGE_DESTROY_UNMAP); 428 agp_bridge->driver->agp_destroy_page(curr->pages[0], 429 AGP_PAGE_DESTROY_FREE); 430 } 431 agp_free_page_array(curr); 432 } 433 kfree(curr); 434} 435 436static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, 437 dma_addr_t addr, int type) 438{ 439 /* Type checking must be done elsewhere */ 440 return addr | bridge->driver->masks[type].mask; 441} 442 443static int intel_gtt_setup_scratch_page(void) 444{ 445 struct page *page; 446 dma_addr_t dma_addr; 447 448 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 449 if (page == NULL) 450 return -ENOMEM; 451 get_page(page); 452 set_pages_uc(page, 1); 453 454 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { 455 dma_addr = pci_map_page(intel_private.pcidev, page, 0, 456 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 457 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) 458 return -EINVAL; 459 460 intel_private.scratch_page_dma = dma_addr; 461 } else 462 intel_private.scratch_page_dma = page_to_phys(page); 463 464 intel_private.scratch_page = page; 465 466 return 0; 467} 468 469static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { 470 {128, 32768, 5}, 471 /* The 64M mode still requires a 128k gatt */ 472 {64, 16384, 5}, 473 {256, 65536, 6}, 474 {512, 131072, 7}, 475}; 476 477static unsigned int intel_gtt_stolen_entries(void) 478{ 479 u16 gmch_ctrl; 480 u8 rdct; 481 int local = 0; 482 static const int ddt[4] = { 0, 16, 32, 64 }; 483 unsigned int overhead_entries, stolen_entries; 484 unsigned int stolen_size = 0; 485 486 pci_read_config_word(intel_private.bridge_dev, 487 I830_GMCH_CTRL, &gmch_ctrl); 488 489 if (INTEL_GTT_GEN > 4 || IS_PINEVIEW) 490 overhead_entries = 0; 491 else 492 overhead_entries = intel_private.base.gtt_mappable_entries 493 / 1024; 494 495 overhead_entries += 1; /* BIOS popup */ 496 497 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || 498 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { 499 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 500 case I830_GMCH_GMS_STOLEN_512: 501 stolen_size = KB(512); 502 break; 503 case I830_GMCH_GMS_STOLEN_1024: 504 stolen_size = MB(1); 505 break; 506 case I830_GMCH_GMS_STOLEN_8192: 507 stolen_size = MB(8); 508 break; 509 case I830_GMCH_GMS_LOCAL: 510 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); 511 stolen_size = (I830_RDRAM_ND(rdct) + 1) * 512 MB(ddt[I830_RDRAM_DDT(rdct)]); 513 local = 1; 514 break; 515 default: 516 stolen_size = 0; 517 break; 518 } 519 } else if (INTEL_GTT_GEN == 6) { 520 /* 521 * SandyBridge has new memory control reg at 0x50.w 522 */ 523 u16 snb_gmch_ctl; 524 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 525 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { 526 case SNB_GMCH_GMS_STOLEN_32M: 527 stolen_size = MB(32); 528 break; 529 case SNB_GMCH_GMS_STOLEN_64M: 530 stolen_size = MB(64); 531 break; 532 case SNB_GMCH_GMS_STOLEN_96M: 533 stolen_size = MB(96); 534 break; 535 case SNB_GMCH_GMS_STOLEN_128M: 536 stolen_size = MB(128); 537 break; 538 case SNB_GMCH_GMS_STOLEN_160M: 539 stolen_size = MB(160); 540 break; 541 case SNB_GMCH_GMS_STOLEN_192M: 542 stolen_size = MB(192); 543 break; 544 case SNB_GMCH_GMS_STOLEN_224M: 545 stolen_size = MB(224); 546 break; 547 case SNB_GMCH_GMS_STOLEN_256M: 548 stolen_size = MB(256); 549 break; 550 case SNB_GMCH_GMS_STOLEN_288M: 551 stolen_size = MB(288); 552 break; 553 case SNB_GMCH_GMS_STOLEN_320M: 554 stolen_size = MB(320); 555 break; 556 case SNB_GMCH_GMS_STOLEN_352M: 557 stolen_size = MB(352); 558 break; 559 case SNB_GMCH_GMS_STOLEN_384M: 560 stolen_size = MB(384); 561 break; 562 case SNB_GMCH_GMS_STOLEN_416M: 563 stolen_size = MB(416); 564 break; 565 case SNB_GMCH_GMS_STOLEN_448M: 566 stolen_size = MB(448); 567 break; 568 case SNB_GMCH_GMS_STOLEN_480M: 569 stolen_size = MB(480); 570 break; 571 case SNB_GMCH_GMS_STOLEN_512M: 572 stolen_size = MB(512); 573 break; 574 } 575 } else { 576 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 577 case I855_GMCH_GMS_STOLEN_1M: 578 stolen_size = MB(1); 579 break; 580 case I855_GMCH_GMS_STOLEN_4M: 581 stolen_size = MB(4); 582 break; 583 case I855_GMCH_GMS_STOLEN_8M: 584 stolen_size = MB(8); 585 break; 586 case I855_GMCH_GMS_STOLEN_16M: 587 stolen_size = MB(16); 588 break; 589 case I855_GMCH_GMS_STOLEN_32M: 590 stolen_size = MB(32); 591 break; 592 case I915_GMCH_GMS_STOLEN_48M: 593 stolen_size = MB(48); 594 break; 595 case I915_GMCH_GMS_STOLEN_64M: 596 stolen_size = MB(64); 597 break; 598 case G33_GMCH_GMS_STOLEN_128M: 599 stolen_size = MB(128); 600 break; 601 case G33_GMCH_GMS_STOLEN_256M: 602 stolen_size = MB(256); 603 break; 604 case INTEL_GMCH_GMS_STOLEN_96M: 605 stolen_size = MB(96); 606 break; 607 case INTEL_GMCH_GMS_STOLEN_160M: 608 stolen_size = MB(160); 609 break; 610 case INTEL_GMCH_GMS_STOLEN_224M: 611 stolen_size = MB(224); 612 break; 613 case INTEL_GMCH_GMS_STOLEN_352M: 614 stolen_size = MB(352); 615 break; 616 default: 617 stolen_size = 0; 618 break; 619 } 620 } 621 622 if (!local && stolen_size > intel_max_stolen) { 623 dev_info(&intel_private.bridge_dev->dev, 624 "detected %dK stolen memory, trimming to %dK\n", 625 stolen_size / KB(1), intel_max_stolen / KB(1)); 626 stolen_size = intel_max_stolen; 627 } else if (stolen_size > 0) { 628 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", 629 stolen_size / KB(1), local ? "local" : "stolen"); 630 } else { 631 dev_info(&intel_private.bridge_dev->dev, 632 "no pre-allocated video memory detected\n"); 633 stolen_size = 0; 634 } 635 636 stolen_entries = stolen_size/KB(4) - overhead_entries; 637 638 return stolen_entries; 639} 640 641static unsigned int intel_gtt_total_entries(void) 642{ 643 int size; 644 645 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) { 646 u32 pgetbl_ctl; 647 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); 648 649 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { 650 case I965_PGETBL_SIZE_128KB: 651 size = KB(128); 652 break; 653 case I965_PGETBL_SIZE_256KB: 654 size = KB(256); 655 break; 656 case I965_PGETBL_SIZE_512KB: 657 size = KB(512); 658 break; 659 case I965_PGETBL_SIZE_1MB: 660 size = KB(1024); 661 break; 662 case I965_PGETBL_SIZE_2MB: 663 size = KB(2048); 664 break; 665 case I965_PGETBL_SIZE_1_5MB: 666 size = KB(1024 + 512); 667 break; 668 default: 669 dev_info(&intel_private.pcidev->dev, 670 "unknown page table size, assuming 512KB\n"); 671 size = KB(512); 672 } 673 674 return size/4; 675 } else if (INTEL_GTT_GEN == 6) { 676 u16 snb_gmch_ctl; 677 678 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 679 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { 680 default: 681 case SNB_GTT_SIZE_0M: 682 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); 683 size = MB(0); 684 break; 685 case SNB_GTT_SIZE_1M: 686 size = MB(1); 687 break; 688 case SNB_GTT_SIZE_2M: 689 size = MB(2); 690 break; 691 } 692 return size/4; 693 } else { 694 /* On previous hardware, the GTT size was just what was 695 * required to map the aperture. 696 */ 697 return intel_private.base.gtt_mappable_entries; 698 } 699} 700 701static unsigned int intel_gtt_mappable_entries(void) 702{ 703 unsigned int aperture_size; 704 705 if (INTEL_GTT_GEN == 2) { 706 u16 gmch_ctrl; 707 708 pci_read_config_word(intel_private.bridge_dev, 709 I830_GMCH_CTRL, &gmch_ctrl); 710 711 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) 712 aperture_size = MB(64); 713 else 714 aperture_size = MB(128); 715 } else { 716 /* 9xx supports large sizes, just look at the length */ 717 aperture_size = pci_resource_len(intel_private.pcidev, 2); 718 } 719 720 return aperture_size >> PAGE_SHIFT; 721} 722 723static void intel_gtt_teardown_scratch_page(void) 724{ 725 set_pages_wb(intel_private.scratch_page, 1); 726 pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, 727 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 728 put_page(intel_private.scratch_page); 729 __free_page(intel_private.scratch_page); 730} 731 732static void intel_gtt_cleanup(void) 733{ 734 if (intel_private.i9xx_flush_page) 735 iounmap(intel_private.i9xx_flush_page); 736 if (intel_private.resource_valid) 737 release_resource(&intel_private.ifp_resource); 738 intel_private.ifp_resource.start = 0; 739 intel_private.resource_valid = 0; 740 iounmap(intel_private.gtt); 741 iounmap(intel_private.registers); 742 743 intel_gtt_teardown_scratch_page(); 744} 745 746static int intel_gtt_init(void) 747{ 748 u32 gtt_map_size; 749 int ret; 750 751 ret = intel_private.driver->setup(); 752 if (ret != 0) 753 return ret; 754 755 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); 756 intel_private.base.gtt_total_entries = intel_gtt_total_entries(); 757 758 dev_info(&intel_private.bridge_dev->dev, 759 "detected gtt size: %dK total, %dK mappable\n", 760 intel_private.base.gtt_total_entries * 4, 761 intel_private.base.gtt_mappable_entries * 4); 762 763 gtt_map_size = intel_private.base.gtt_total_entries * 4; 764 765 intel_private.gtt = ioremap(intel_private.gtt_bus_addr, 766 gtt_map_size); 767 if (!intel_private.gtt) { 768 iounmap(intel_private.registers); 769 return -ENOMEM; 770 } 771 772 global_cache_flush(); /* FIXME: ? */ 773 774 /* we have to call this as early as possible after the MMIO base address is known */ 775 intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); 776 if (intel_private.base.gtt_stolen_entries == 0) { 777 iounmap(intel_private.registers); 778 iounmap(intel_private.gtt); 779 return -ENOMEM; 780 } 781 782 ret = intel_gtt_setup_scratch_page(); 783 if (ret != 0) { 784 intel_gtt_cleanup(); 785 return ret; 786 } 787 788 return 0; 789} 790 791static int intel_fake_agp_fetch_size(void) 792{ 793 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); 794 unsigned int aper_size; 795 int i; 796 797 aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) 798 / MB(1); 799 800 for (i = 0; i < num_sizes; i++) { 801 if (aper_size == intel_fake_agp_sizes[i].size) { 802 agp_bridge->current_size = 803 (void *) (intel_fake_agp_sizes + i); 804 return aper_size; 805 } 806 } 807 808 return 0; 809} 810 811static void intel_i830_fini_flush(void) 812{ 813 kunmap(intel_private.i8xx_page); 814 intel_private.i8xx_flush_page = NULL; 815 unmap_page_from_agp(intel_private.i8xx_page); 816 817 __free_page(intel_private.i8xx_page); 818 intel_private.i8xx_page = NULL; 819} 820 821static void intel_i830_setup_flush(void) 822{ 823 /* return if we've already set the flush mechanism up */ 824 if (intel_private.i8xx_page) 825 return; 826 827 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); 828 if (!intel_private.i8xx_page) 829 return; 830 831 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); 832 if (!intel_private.i8xx_flush_page) 833 intel_i830_fini_flush(); 834} 835 836/* The chipset_flush interface needs to get data that has already been 837 * flushed out of the CPU all the way out to main memory, because the GPU 838 * doesn't snoop those buffers. 839 * 840 * The 8xx series doesn't have the same lovely interface for flushing the 841 * chipset write buffers that the later chips do. According to the 865 842 * specs, it's 64 octwords, or 1KB. So, to get those previous things in 843 * that buffer out, we just fill 1KB and clflush it out, on the assumption 844 * that it'll push whatever was in there out. It appears to work. 845 */ 846static void i830_chipset_flush(void) 847{ 848 unsigned int *pg = intel_private.i8xx_flush_page; 849 850 memset(pg, 0, 1024); 851 852 if (cpu_has_clflush) 853 clflush_cache_range(pg, 1024); 854 else if (wbinvd_on_all_cpus() != 0) 855 printk(KERN_ERR "Timed out waiting for cache flush.\n"); 856} 857 858static void i830_write_entry(dma_addr_t addr, unsigned int entry, 859 unsigned int flags) 860{ 861 u32 pte_flags = I810_PTE_VALID; 862 863 switch (flags) { 864 case AGP_DCACHE_MEMORY: 865 pte_flags |= I810_PTE_LOCAL; 866 break; 867 case AGP_USER_CACHED_MEMORY: 868 pte_flags |= I830_PTE_SYSTEM_CACHED; 869 break; 870 } 871 872 writel(addr | pte_flags, intel_private.gtt + entry); 873} 874 875static void intel_enable_gtt(void) 876{ 877 u32 gma_addr; 878 u16 gmch_ctrl; 879 880 if (INTEL_GTT_GEN == 2) 881 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, 882 &gma_addr); 883 else 884 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, 885 &gma_addr); 886 887 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); 888 889 pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); 890 gmch_ctrl |= I830_GMCH_ENABLED; 891 pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); 892 893 writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED, 894 intel_private.registers+I810_PGETBL_CTL); 895 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 896} 897 898static int i830_setup(void) 899{ 900 u32 reg_addr; 901 902 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr); 903 reg_addr &= 0xfff80000; 904 905 intel_private.registers = ioremap(reg_addr, KB(64)); 906 if (!intel_private.registers) 907 return -ENOMEM; 908 909 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; 910 intel_private.pte_bus_addr = 911 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 912 913 intel_i830_setup_flush(); 914 915 return 0; 916} 917 918static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge) 919{ 920 agp_bridge->gatt_table_real = NULL; 921 agp_bridge->gatt_table = NULL; 922 agp_bridge->gatt_bus_addr = 0; 923 924 return 0; 925} 926 927static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) 928{ 929 return 0; 930} 931 932static int intel_fake_agp_configure(void) 933{ 934 int i; 935 936 intel_enable_gtt(); 937 938 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; 939 940 for (i = intel_private.base.gtt_stolen_entries; 941 i < intel_private.base.gtt_total_entries; i++) { 942 intel_private.driver->write_entry(intel_private.scratch_page_dma, 943 i, 0); 944 } 945 readl(intel_private.gtt+i-1); /* PCI Posting. */ 946 947 global_cache_flush(); 948 949 return 0; 950} 951 952static bool i830_check_flags(unsigned int flags) 953{ 954 switch (flags) { 955 case 0: 956 case AGP_PHYS_MEMORY: 957 case AGP_USER_CACHED_MEMORY: 958 case AGP_USER_MEMORY: 959 return true; 960 } 961 962 return false; 963} 964 965static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, 966 unsigned int sg_len, 967 unsigned int pg_start, 968 unsigned int flags) 969{ 970 struct scatterlist *sg; 971 unsigned int len, m; 972 int i, j; 973 974 j = pg_start; 975 976 /* sg may merge pages, but we have to separate 977 * per-page addr for GTT */ 978 for_each_sg(sg_list, sg, sg_len, i) { 979 len = sg_dma_len(sg) >> PAGE_SHIFT; 980 for (m = 0; m < len; m++) { 981 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 982 intel_private.driver->write_entry(addr, 983 j, flags); 984 j++; 985 } 986 } 987 readl(intel_private.gtt+j-1); 988} 989 990static int intel_fake_agp_insert_entries(struct agp_memory *mem, 991 off_t pg_start, int type) 992{ 993 int i, j; 994 int ret = -EINVAL; 995 996 if (mem->page_count == 0) 997 goto out; 998 999 if (pg_start < intel_private.base.gtt_stolen_entries) { 1000 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, 1001 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", 1002 pg_start, intel_private.base.gtt_stolen_entries); 1003 1004 dev_info(&intel_private.pcidev->dev, 1005 "trying to insert into local/stolen memory\n"); 1006 goto out_err; 1007 } 1008 1009 if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries) 1010 goto out_err; 1011 1012 if (type != mem->type) 1013 goto out_err; 1014 1015 if (!intel_private.driver->check_flags(type)) 1016 goto out_err; 1017 1018 if (!mem->is_flushed) 1019 global_cache_flush(); 1020 1021 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { 1022 ret = intel_agp_map_memory(mem); 1023 if (ret != 0) 1024 return ret; 1025 1026 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, 1027 pg_start, type); 1028 } else { 1029 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1030 dma_addr_t addr = page_to_phys(mem->pages[i]); 1031 intel_private.driver->write_entry(addr, 1032 j, type); 1033 } 1034 readl(intel_private.gtt+j-1); 1035 } 1036 1037out: 1038 ret = 0; 1039out_err: 1040 mem->is_flushed = true; 1041 return ret; 1042} 1043 1044static int intel_fake_agp_remove_entries(struct agp_memory *mem, 1045 off_t pg_start, int type) 1046{ 1047 int i; 1048 1049 if (mem->page_count == 0) 1050 return 0; 1051 1052 if (pg_start < intel_private.base.gtt_stolen_entries) { 1053 dev_info(&intel_private.pcidev->dev, 1054 "trying to disable local/stolen memory\n"); 1055 return -EINVAL; 1056 } 1057 1058 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) 1059 intel_agp_unmap_memory(mem); 1060 1061 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1062 intel_private.driver->write_entry(intel_private.scratch_page_dma, 1063 i, 0); 1064 } 1065 readl(intel_private.gtt+i-1); 1066 1067 return 0; 1068} 1069 1070static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge) 1071{ 1072 intel_private.driver->chipset_flush(); 1073} 1074 1075static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, 1076 int type) 1077{ 1078 if (type == AGP_PHYS_MEMORY) 1079 return alloc_agpphysmem_i8xx(pg_count, type); 1080 /* always return NULL for other allocation types for now */ 1081 return NULL; 1082} 1083 1084static int intel_alloc_chipset_flush_resource(void) 1085{ 1086 int ret; 1087 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE, 1088 PAGE_SIZE, PCIBIOS_MIN_MEM, 0, 1089 pcibios_align_resource, intel_private.bridge_dev); 1090 1091 return ret; 1092} 1093 1094static void intel_i915_setup_chipset_flush(void) 1095{ 1096 int ret; 1097 u32 temp; 1098 1099 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp); 1100 if (!(temp & 0x1)) { 1101 intel_alloc_chipset_flush_resource(); 1102 intel_private.resource_valid = 1; 1103 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); 1104 } else { 1105 temp &= ~1; 1106 1107 intel_private.resource_valid = 1; 1108 intel_private.ifp_resource.start = temp; 1109 intel_private.ifp_resource.end = temp + PAGE_SIZE; 1110 ret = request_resource(&iomem_resource, &intel_private.ifp_resource); 1111 /* some BIOSes reserve this area in a pnp some don't */ 1112 if (ret) 1113 intel_private.resource_valid = 0; 1114 } 1115} 1116 1117static void intel_i965_g33_setup_chipset_flush(void) 1118{ 1119 u32 temp_hi, temp_lo; 1120 int ret; 1121 1122 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi); 1123 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo); 1124 1125 if (!(temp_lo & 0x1)) { 1126 1127 intel_alloc_chipset_flush_resource(); 1128 1129 intel_private.resource_valid = 1; 1130 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, 1131 upper_32_bits(intel_private.ifp_resource.start)); 1132 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); 1133 } else { 1134 u64 l64; 1135 1136 temp_lo &= ~0x1; 1137 l64 = ((u64)temp_hi << 32) | temp_lo; 1138 1139 intel_private.resource_valid = 1; 1140 intel_private.ifp_resource.start = l64; 1141 intel_private.ifp_resource.end = l64 + PAGE_SIZE; 1142 ret = request_resource(&iomem_resource, &intel_private.ifp_resource); 1143 /* some BIOSes reserve this area in a pnp some don't */ 1144 if (ret) 1145 intel_private.resource_valid = 0; 1146 } 1147} 1148 1149static void intel_i9xx_setup_flush(void) 1150{ 1151 /* return if already configured */ 1152 if (intel_private.ifp_resource.start) 1153 return; 1154 1155 if (INTEL_GTT_GEN == 6) 1156 return; 1157 1158 /* setup a resource for this object */ 1159 intel_private.ifp_resource.name = "Intel Flush Page"; 1160 intel_private.ifp_resource.flags = IORESOURCE_MEM; 1161 1162 /* Setup chipset flush for 915 */ 1163 if (IS_G33 || INTEL_GTT_GEN >= 4) { 1164 intel_i965_g33_setup_chipset_flush(); 1165 } else { 1166 intel_i915_setup_chipset_flush(); 1167 } 1168 1169 if (intel_private.ifp_resource.start) 1170 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); 1171 if (!intel_private.i9xx_flush_page) 1172 dev_err(&intel_private.pcidev->dev, 1173 "can't ioremap flush page - no chipset flushing\n"); 1174} 1175 1176static void i9xx_chipset_flush(void) 1177{ 1178 if (intel_private.i9xx_flush_page) 1179 writel(1, intel_private.i9xx_flush_page); 1180} 1181 1182static void i965_write_entry(dma_addr_t addr, unsigned int entry, 1183 unsigned int flags) 1184{ 1185 /* Shift high bits down */ 1186 addr |= (addr >> 28) & 0xf0; 1187 writel(addr | I810_PTE_VALID, intel_private.gtt + entry); 1188} 1189 1190static bool gen6_check_flags(unsigned int flags) 1191{ 1192 return true; 1193} 1194 1195static void gen6_write_entry(dma_addr_t addr, unsigned int entry, 1196 unsigned int flags) 1197{ 1198 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; 1199 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; 1200 u32 pte_flags; 1201 1202 if (type_mask == AGP_USER_UNCACHED_MEMORY) 1203 pte_flags = GEN6_PTE_UNCACHED; 1204 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { 1205 pte_flags = GEN6_PTE_LLC; 1206 if (gfdt) 1207 pte_flags |= GEN6_PTE_GFDT; 1208 } else { /* set 'normal'/'cached' to LLC by default */ 1209 pte_flags = GEN6_PTE_LLC_MLC; 1210 if (gfdt) 1211 pte_flags |= GEN6_PTE_GFDT; 1212 } 1213 1214 /* gen6 has bit11-4 for physical addr bit39-32 */ 1215 addr |= (addr >> 28) & 0xff0; 1216 writel(addr | pte_flags, intel_private.gtt + entry); 1217} 1218 1219static int i9xx_setup(void) 1220{ 1221 u32 reg_addr; 1222 1223 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); 1224 1225 reg_addr &= 0xfff80000; 1226 1227 intel_private.registers = ioremap(reg_addr, 128 * 4096); 1228 if (!intel_private.registers) 1229 return -ENOMEM; 1230 1231 if (INTEL_GTT_GEN == 3) { 1232 u32 gtt_addr; 1233 1234 pci_read_config_dword(intel_private.pcidev, 1235 I915_PTEADDR, >t_addr); 1236 intel_private.gtt_bus_addr = gtt_addr; 1237 } else { 1238 u32 gtt_offset; 1239 1240 switch (INTEL_GTT_GEN) { 1241 case 5: 1242 case 6: 1243 gtt_offset = MB(2); 1244 break; 1245 case 4: 1246 default: 1247 gtt_offset = KB(512); 1248 break; 1249 } 1250 intel_private.gtt_bus_addr = reg_addr + gtt_offset; 1251 } 1252 1253 intel_private.pte_bus_addr = 1254 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 1255 1256 intel_i9xx_setup_flush(); 1257 1258 return 0; 1259} 1260 1261static const struct agp_bridge_driver intel_810_driver = { 1262 .owner = THIS_MODULE, 1263 .aperture_sizes = intel_i810_sizes, 1264 .size_type = FIXED_APER_SIZE, 1265 .num_aperture_sizes = 2, 1266 .needs_scratch_page = true, 1267 .configure = intel_i810_configure, 1268 .fetch_size = intel_i810_fetch_size, 1269 .cleanup = intel_i810_cleanup, 1270 .mask_memory = intel_i810_mask_memory, 1271 .masks = intel_i810_masks, 1272 .agp_enable = intel_fake_agp_enable, 1273 .cache_flush = global_cache_flush, 1274 .create_gatt_table = agp_generic_create_gatt_table, 1275 .free_gatt_table = agp_generic_free_gatt_table, 1276 .insert_memory = intel_i810_insert_entries, 1277 .remove_memory = intel_i810_remove_entries, 1278 .alloc_by_type = intel_i810_alloc_by_type, 1279 .free_by_type = intel_i810_free_by_type, 1280 .agp_alloc_page = agp_generic_alloc_page, 1281 .agp_alloc_pages = agp_generic_alloc_pages, 1282 .agp_destroy_page = agp_generic_destroy_page, 1283 .agp_destroy_pages = agp_generic_destroy_pages, 1284 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1285}; 1286 1287static const struct agp_bridge_driver intel_fake_agp_driver = { 1288 .owner = THIS_MODULE, 1289 .size_type = FIXED_APER_SIZE, 1290 .aperture_sizes = intel_fake_agp_sizes, 1291 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), 1292 .configure = intel_fake_agp_configure, 1293 .fetch_size = intel_fake_agp_fetch_size, 1294 .cleanup = intel_gtt_cleanup, 1295 .agp_enable = intel_fake_agp_enable, 1296 .cache_flush = global_cache_flush, 1297 .create_gatt_table = intel_fake_agp_create_gatt_table, 1298 .free_gatt_table = intel_fake_agp_free_gatt_table, 1299 .insert_memory = intel_fake_agp_insert_entries, 1300 .remove_memory = intel_fake_agp_remove_entries, 1301 .alloc_by_type = intel_fake_agp_alloc_by_type, 1302 .free_by_type = intel_i810_free_by_type, 1303 .agp_alloc_page = agp_generic_alloc_page, 1304 .agp_alloc_pages = agp_generic_alloc_pages, 1305 .agp_destroy_page = agp_generic_destroy_page, 1306 .agp_destroy_pages = agp_generic_destroy_pages, 1307 .chipset_flush = intel_fake_agp_chipset_flush, 1308}; 1309 1310static const struct intel_gtt_driver i81x_gtt_driver = { 1311 .gen = 1, 1312}; 1313static const struct intel_gtt_driver i8xx_gtt_driver = { 1314 .gen = 2, 1315 .setup = i830_setup, 1316 .write_entry = i830_write_entry, 1317 .check_flags = i830_check_flags, 1318 .chipset_flush = i830_chipset_flush, 1319}; 1320static const struct intel_gtt_driver i915_gtt_driver = { 1321 .gen = 3, 1322 .setup = i9xx_setup, 1323 /* i945 is the last gpu to need phys mem (for overlay and cursors). */ 1324 .write_entry = i830_write_entry, 1325 .check_flags = i830_check_flags, 1326 .chipset_flush = i9xx_chipset_flush, 1327}; 1328static const struct intel_gtt_driver g33_gtt_driver = { 1329 .gen = 3, 1330 .is_g33 = 1, 1331 .setup = i9xx_setup, 1332 .write_entry = i965_write_entry, 1333 .check_flags = i830_check_flags, 1334 .chipset_flush = i9xx_chipset_flush, 1335}; 1336static const struct intel_gtt_driver pineview_gtt_driver = { 1337 .gen = 3, 1338 .is_pineview = 1, .is_g33 = 1, 1339 .setup = i9xx_setup, 1340 .write_entry = i965_write_entry, 1341 .check_flags = i830_check_flags, 1342 .chipset_flush = i9xx_chipset_flush, 1343}; 1344static const struct intel_gtt_driver i965_gtt_driver = { 1345 .gen = 4, 1346 .setup = i9xx_setup, 1347 .write_entry = i965_write_entry, 1348 .check_flags = i830_check_flags, 1349 .chipset_flush = i9xx_chipset_flush, 1350}; 1351static const struct intel_gtt_driver g4x_gtt_driver = { 1352 .gen = 5, 1353 .setup = i9xx_setup, 1354 .write_entry = i965_write_entry, 1355 .check_flags = i830_check_flags, 1356 .chipset_flush = i9xx_chipset_flush, 1357}; 1358static const struct intel_gtt_driver ironlake_gtt_driver = { 1359 .gen = 5, 1360 .is_ironlake = 1, 1361 .setup = i9xx_setup, 1362 .write_entry = i965_write_entry, 1363 .check_flags = i830_check_flags, 1364 .chipset_flush = i9xx_chipset_flush, 1365}; 1366static const struct intel_gtt_driver sandybridge_gtt_driver = { 1367 .gen = 6, 1368 .setup = i9xx_setup, 1369 .write_entry = gen6_write_entry, 1370 .check_flags = gen6_check_flags, 1371 .chipset_flush = i9xx_chipset_flush, 1372}; 1373 1374/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of 1375 * driver and gmch_driver must be non-null, and find_gmch will determine 1376 * which one should be used if a gmch_chip_id is present. 1377 */ 1378static const struct intel_gtt_driver_description { 1379 unsigned int gmch_chip_id; 1380 char *name; 1381 const struct agp_bridge_driver *gmch_driver; 1382 const struct intel_gtt_driver *gtt_driver; 1383} intel_gtt_chipsets[] = { 1384 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver, 1385 &i81x_gtt_driver}, 1386 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver, 1387 &i81x_gtt_driver}, 1388 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver, 1389 &i81x_gtt_driver}, 1390 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver, 1391 &i81x_gtt_driver}, 1392 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", 1393 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1394 { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", 1395 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1396 { PCI_DEVICE_ID_INTEL_82854_IG, "854", 1397 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1398 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", 1399 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1400 { PCI_DEVICE_ID_INTEL_82865_IG, "865", 1401 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1402 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", 1403 &intel_fake_agp_driver, &i915_gtt_driver }, 1404 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", 1405 &intel_fake_agp_driver, &i915_gtt_driver }, 1406 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", 1407 &intel_fake_agp_driver, &i915_gtt_driver }, 1408 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", 1409 &intel_fake_agp_driver, &i915_gtt_driver }, 1410 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", 1411 &intel_fake_agp_driver, &i915_gtt_driver }, 1412 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", 1413 &intel_fake_agp_driver, &i915_gtt_driver }, 1414 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", 1415 &intel_fake_agp_driver, &i965_gtt_driver }, 1416 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", 1417 &intel_fake_agp_driver, &i965_gtt_driver }, 1418 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", 1419 &intel_fake_agp_driver, &i965_gtt_driver }, 1420 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", 1421 &intel_fake_agp_driver, &i965_gtt_driver }, 1422 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", 1423 &intel_fake_agp_driver, &i965_gtt_driver }, 1424 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", 1425 &intel_fake_agp_driver, &i965_gtt_driver }, 1426 { PCI_DEVICE_ID_INTEL_G33_IG, "G33", 1427 &intel_fake_agp_driver, &g33_gtt_driver }, 1428 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", 1429 &intel_fake_agp_driver, &g33_gtt_driver }, 1430 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", 1431 &intel_fake_agp_driver, &g33_gtt_driver }, 1432 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", 1433 &intel_fake_agp_driver, &pineview_gtt_driver }, 1434 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", 1435 &intel_fake_agp_driver, &pineview_gtt_driver }, 1436 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", 1437 &intel_fake_agp_driver, &g4x_gtt_driver }, 1438 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", 1439 &intel_fake_agp_driver, &g4x_gtt_driver }, 1440 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", 1441 &intel_fake_agp_driver, &g4x_gtt_driver }, 1442 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", 1443 &intel_fake_agp_driver, &g4x_gtt_driver }, 1444 { PCI_DEVICE_ID_INTEL_B43_IG, "B43", 1445 &intel_fake_agp_driver, &g4x_gtt_driver }, 1446 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", 1447 &intel_fake_agp_driver, &g4x_gtt_driver }, 1448 { PCI_DEVICE_ID_INTEL_G41_IG, "G41", 1449 &intel_fake_agp_driver, &g4x_gtt_driver }, 1450 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 1451 "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, 1452 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 1453 "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, 1454 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, 1455 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1456 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, 1457 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1458 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, 1459 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1460 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, 1461 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1462 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, 1463 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1464 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, 1465 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1466 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, 1467 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1468 { 0, NULL, NULL } 1469}; 1470 1471static int find_gmch(u16 device) 1472{ 1473 struct pci_dev *gmch_device; 1474 1475 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); 1476 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { 1477 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, 1478 device, gmch_device); 1479 } 1480 1481 if (!gmch_device) 1482 return 0; 1483 1484 intel_private.pcidev = gmch_device; 1485 return 1; 1486} 1487 1488int intel_gmch_probe(struct pci_dev *pdev, 1489 struct agp_bridge_data *bridge) 1490{ 1491 int i, mask; 1492 bridge->driver = NULL; 1493 1494 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { 1495 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { 1496 bridge->driver = 1497 intel_gtt_chipsets[i].gmch_driver; 1498 intel_private.driver = 1499 intel_gtt_chipsets[i].gtt_driver; 1500 break; 1501 } 1502 } 1503 1504 if (!bridge->driver) 1505 return 0; 1506 1507 bridge->dev_private_data = &intel_private; 1508 bridge->dev = pdev; 1509 1510 intel_private.bridge_dev = pci_dev_get(pdev); 1511 1512 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); 1513 1514 if (intel_private.driver->write_entry == gen6_write_entry) 1515 mask = 40; 1516 else if (intel_private.driver->write_entry == i965_write_entry) 1517 mask = 36; 1518 else 1519 mask = 32; 1520 1521 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) 1522 dev_err(&intel_private.pcidev->dev, 1523 "set gfx device dma mask %d-bit failed!\n", mask); 1524 else 1525 pci_set_consistent_dma_mask(intel_private.pcidev, 1526 DMA_BIT_MASK(mask)); 1527 1528 if (bridge->driver == &intel_810_driver) 1529 return 1; 1530 1531 if (intel_gtt_init() != 0) 1532 return 0; 1533 1534 return 1; 1535} 1536EXPORT_SYMBOL(intel_gmch_probe); 1537 1538struct intel_gtt *intel_gtt_get(void) 1539{ 1540 return &intel_private.base; 1541} 1542EXPORT_SYMBOL(intel_gtt_get); 1543 1544void intel_gmch_remove(struct pci_dev *pdev) 1545{ 1546 if (intel_private.pcidev) 1547 pci_dev_put(intel_private.pcidev); 1548 if (intel_private.bridge_dev) 1549 pci_dev_put(intel_private.bridge_dev); 1550} 1551EXPORT_SYMBOL(intel_gmch_remove); 1552 1553MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 1554MODULE_LICENSE("GPL and additional rights"); 1555