intel-gtt.c revision ffdd7510b0bd5ec663b6b11b39810574f2ce3111
1/* 2 * Intel GTT (Graphics Translation Table) routines 3 * 4 * Caveat: This driver implements the linux agp interface, but this is far from 5 * a agp driver! GTT support ended up here for purely historical reasons: The 6 * old userspace intel graphics drivers needed an interface to map memory into 7 * the GTT. And the drm provides a default interface for graphic devices sitting 8 * on an agp port. So it made sense to fake the GTT support as an agp port to 9 * avoid having to create a new api. 10 * 11 * With gem this does not make much sense anymore, just needlessly complicates 12 * the code. But as long as the old graphics stack is still support, it's stuck 13 * here. 14 * 15 * /fairy-tale-mode off 16 */ 17 18#include <linux/module.h> 19#include <linux/pci.h> 20#include <linux/init.h> 21#include <linux/kernel.h> 22#include <linux/pagemap.h> 23#include <linux/agp_backend.h> 24#include <asm/smp.h> 25#include "agp.h" 26#include "intel-agp.h" 27#include <linux/intel-gtt.h> 28#include <drm/intel-gtt.h> 29 30/* 31 * If we have Intel graphics, we're not going to have anything other than 32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent 33 * on the Intel IOMMU support (CONFIG_DMAR). 34 * Only newer chipsets need to bother with this, of course. 35 */ 36#ifdef CONFIG_DMAR 37#define USE_PCI_DMA_API 1 38#endif 39 40/* Max amount of stolen space, anything above will be returned to Linux */ 41int intel_max_stolen = 32 * 1024 * 1024; 42EXPORT_SYMBOL(intel_max_stolen); 43 44static const struct aper_size_info_fixed intel_i810_sizes[] = 45{ 46 {64, 16384, 4}, 47 /* The 32M mode still requires a 64k gatt */ 48 {32, 8192, 4} 49}; 50 51#define AGP_DCACHE_MEMORY 1 52#define AGP_PHYS_MEMORY 2 53#define INTEL_AGP_CACHED_MEMORY 3 54 55static struct gatt_mask intel_i810_masks[] = 56{ 57 {.mask = I810_PTE_VALID, .type = 0}, 58 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, 59 {.mask = I810_PTE_VALID, .type = 0}, 60 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, 61 .type = INTEL_AGP_CACHED_MEMORY} 62}; 63 64#define INTEL_AGP_UNCACHED_MEMORY 0 65#define INTEL_AGP_CACHED_MEMORY_LLC 1 66#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2 67#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 68#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 69 70static struct gatt_mask intel_gen6_masks[] = 71{ 72 {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED, 73 .type = INTEL_AGP_UNCACHED_MEMORY }, 74 {.mask = I810_PTE_VALID | GEN6_PTE_LLC, 75 .type = INTEL_AGP_CACHED_MEMORY_LLC }, 76 {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT, 77 .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT }, 78 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC, 79 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC }, 80 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT, 81 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT }, 82}; 83 84static struct _intel_private { 85 struct intel_gtt base; 86 struct pci_dev *pcidev; /* device one */ 87 struct pci_dev *bridge_dev; 88 u8 __iomem *registers; 89 u32 __iomem *gtt; /* I915G */ 90 int num_dcache_entries; 91 union { 92 void __iomem *i9xx_flush_page; 93 void *i8xx_flush_page; 94 }; 95 struct page *i8xx_page; 96 struct resource ifp_resource; 97 int resource_valid; 98} intel_private; 99 100#ifdef USE_PCI_DMA_API 101static int intel_agp_map_page(struct page *page, dma_addr_t *ret) 102{ 103 *ret = pci_map_page(intel_private.pcidev, page, 0, 104 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 105 if (pci_dma_mapping_error(intel_private.pcidev, *ret)) 106 return -EINVAL; 107 return 0; 108} 109 110static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) 111{ 112 pci_unmap_page(intel_private.pcidev, dma, 113 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 114} 115 116static void intel_agp_free_sglist(struct agp_memory *mem) 117{ 118 struct sg_table st; 119 120 st.sgl = mem->sg_list; 121 st.orig_nents = st.nents = mem->page_count; 122 123 sg_free_table(&st); 124 125 mem->sg_list = NULL; 126 mem->num_sg = 0; 127} 128 129static int intel_agp_map_memory(struct agp_memory *mem) 130{ 131 struct sg_table st; 132 struct scatterlist *sg; 133 int i; 134 135 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); 136 137 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) 138 goto err; 139 140 mem->sg_list = sg = st.sgl; 141 142 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) 143 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); 144 145 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, 146 mem->page_count, PCI_DMA_BIDIRECTIONAL); 147 if (unlikely(!mem->num_sg)) 148 goto err; 149 150 return 0; 151 152err: 153 sg_free_table(&st); 154 return -ENOMEM; 155} 156 157static void intel_agp_unmap_memory(struct agp_memory *mem) 158{ 159 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); 160 161 pci_unmap_sg(intel_private.pcidev, mem->sg_list, 162 mem->page_count, PCI_DMA_BIDIRECTIONAL); 163 intel_agp_free_sglist(mem); 164} 165 166static void intel_agp_insert_sg_entries(struct agp_memory *mem, 167 off_t pg_start, int mask_type) 168{ 169 struct scatterlist *sg; 170 int i, j; 171 172 j = pg_start; 173 174 WARN_ON(!mem->num_sg); 175 176 if (mem->num_sg == mem->page_count) { 177 for_each_sg(mem->sg_list, sg, mem->page_count, i) { 178 writel(agp_bridge->driver->mask_memory(agp_bridge, 179 sg_dma_address(sg), mask_type), 180 intel_private.gtt+j); 181 j++; 182 } 183 } else { 184 /* sg may merge pages, but we have to separate 185 * per-page addr for GTT */ 186 unsigned int len, m; 187 188 for_each_sg(mem->sg_list, sg, mem->num_sg, i) { 189 len = sg_dma_len(sg) / PAGE_SIZE; 190 for (m = 0; m < len; m++) { 191 writel(agp_bridge->driver->mask_memory(agp_bridge, 192 sg_dma_address(sg) + m * PAGE_SIZE, 193 mask_type), 194 intel_private.gtt+j); 195 j++; 196 } 197 } 198 } 199 readl(intel_private.gtt+j-1); 200} 201 202#else 203 204static void intel_agp_insert_sg_entries(struct agp_memory *mem, 205 off_t pg_start, int mask_type) 206{ 207 int i, j; 208 209 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 210 writel(agp_bridge->driver->mask_memory(agp_bridge, 211 page_to_phys(mem->pages[i]), mask_type), 212 intel_private.gtt+j); 213 } 214 215 readl(intel_private.gtt+j-1); 216} 217 218#endif 219 220static int intel_i810_fetch_size(void) 221{ 222 u32 smram_miscc; 223 struct aper_size_info_fixed *values; 224 225 pci_read_config_dword(intel_private.bridge_dev, 226 I810_SMRAM_MISCC, &smram_miscc); 227 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); 228 229 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { 230 dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n"); 231 return 0; 232 } 233 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { 234 agp_bridge->current_size = (void *) (values + 1); 235 agp_bridge->aperture_size_idx = 1; 236 return values[1].size; 237 } else { 238 agp_bridge->current_size = (void *) (values); 239 agp_bridge->aperture_size_idx = 0; 240 return values[0].size; 241 } 242 243 return 0; 244} 245 246static int intel_i810_configure(void) 247{ 248 struct aper_size_info_fixed *current_size; 249 u32 temp; 250 int i; 251 252 current_size = A_SIZE_FIX(agp_bridge->current_size); 253 254 if (!intel_private.registers) { 255 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); 256 temp &= 0xfff80000; 257 258 intel_private.registers = ioremap(temp, 128 * 4096); 259 if (!intel_private.registers) { 260 dev_err(&intel_private.pcidev->dev, 261 "can't remap memory\n"); 262 return -ENOMEM; 263 } 264 } 265 266 if ((readl(intel_private.registers+I810_DRAM_CTL) 267 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { 268 /* This will need to be dynamically assigned */ 269 dev_info(&intel_private.pcidev->dev, 270 "detected 4MB dedicated video ram\n"); 271 intel_private.num_dcache_entries = 1024; 272 } 273 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); 274 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 275 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); 276 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 277 278 if (agp_bridge->driver->needs_scratch_page) { 279 for (i = 0; i < current_size->num_entries; i++) { 280 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 281 } 282 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ 283 } 284 global_cache_flush(); 285 return 0; 286} 287 288static void intel_i810_cleanup(void) 289{ 290 writel(0, intel_private.registers+I810_PGETBL_CTL); 291 readl(intel_private.registers); /* PCI Posting. */ 292 iounmap(intel_private.registers); 293} 294 295static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) 296{ 297 return; 298} 299 300/* Exists to support ARGB cursors */ 301static struct page *i8xx_alloc_pages(void) 302{ 303 struct page *page; 304 305 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); 306 if (page == NULL) 307 return NULL; 308 309 if (set_pages_uc(page, 4) < 0) { 310 set_pages_wb(page, 4); 311 __free_pages(page, 2); 312 return NULL; 313 } 314 get_page(page); 315 atomic_inc(&agp_bridge->current_memory_agp); 316 return page; 317} 318 319static void i8xx_destroy_pages(struct page *page) 320{ 321 if (page == NULL) 322 return; 323 324 set_pages_wb(page, 4); 325 put_page(page); 326 __free_pages(page, 2); 327 atomic_dec(&agp_bridge->current_memory_agp); 328} 329 330static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, 331 int type) 332{ 333 if (type < AGP_USER_TYPES) 334 return type; 335 else if (type == AGP_USER_CACHED_MEMORY) 336 return INTEL_AGP_CACHED_MEMORY; 337 else 338 return 0; 339} 340 341static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge, 342 int type) 343{ 344 unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT; 345 unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT; 346 347 if (type_mask == AGP_USER_UNCACHED_MEMORY) 348 return INTEL_AGP_UNCACHED_MEMORY; 349 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) 350 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT : 351 INTEL_AGP_CACHED_MEMORY_LLC_MLC; 352 else /* set 'normal'/'cached' to LLC by default */ 353 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT : 354 INTEL_AGP_CACHED_MEMORY_LLC; 355} 356 357 358static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 359 int type) 360{ 361 int i, j, num_entries; 362 void *temp; 363 int ret = -EINVAL; 364 int mask_type; 365 366 if (mem->page_count == 0) 367 goto out; 368 369 temp = agp_bridge->current_size; 370 num_entries = A_SIZE_FIX(temp)->num_entries; 371 372 if ((pg_start + mem->page_count) > num_entries) 373 goto out_err; 374 375 376 for (j = pg_start; j < (pg_start + mem->page_count); j++) { 377 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { 378 ret = -EBUSY; 379 goto out_err; 380 } 381 } 382 383 if (type != mem->type) 384 goto out_err; 385 386 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 387 388 switch (mask_type) { 389 case AGP_DCACHE_MEMORY: 390 if (!mem->is_flushed) 391 global_cache_flush(); 392 for (i = pg_start; i < (pg_start + mem->page_count); i++) { 393 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, 394 intel_private.registers+I810_PTE_BASE+(i*4)); 395 } 396 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 397 break; 398 case AGP_PHYS_MEMORY: 399 case AGP_NORMAL_MEMORY: 400 if (!mem->is_flushed) 401 global_cache_flush(); 402 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 403 writel(agp_bridge->driver->mask_memory(agp_bridge, 404 page_to_phys(mem->pages[i]), mask_type), 405 intel_private.registers+I810_PTE_BASE+(j*4)); 406 } 407 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); 408 break; 409 default: 410 goto out_err; 411 } 412 413out: 414 ret = 0; 415out_err: 416 mem->is_flushed = true; 417 return ret; 418} 419 420static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, 421 int type) 422{ 423 int i; 424 425 if (mem->page_count == 0) 426 return 0; 427 428 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 429 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 430 } 431 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 432 433 return 0; 434} 435 436/* 437 * The i810/i830 requires a physical address to program its mouse 438 * pointer into hardware. 439 * However the Xserver still writes to it through the agp aperture. 440 */ 441static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) 442{ 443 struct agp_memory *new; 444 struct page *page; 445 446 switch (pg_count) { 447 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); 448 break; 449 case 4: 450 /* kludge to get 4 physical pages for ARGB cursor */ 451 page = i8xx_alloc_pages(); 452 break; 453 default: 454 return NULL; 455 } 456 457 if (page == NULL) 458 return NULL; 459 460 new = agp_create_memory(pg_count); 461 if (new == NULL) 462 return NULL; 463 464 new->pages[0] = page; 465 if (pg_count == 4) { 466 /* kludge to get 4 physical pages for ARGB cursor */ 467 new->pages[1] = new->pages[0] + 1; 468 new->pages[2] = new->pages[1] + 1; 469 new->pages[3] = new->pages[2] + 1; 470 } 471 new->page_count = pg_count; 472 new->num_scratch_pages = pg_count; 473 new->type = AGP_PHYS_MEMORY; 474 new->physical = page_to_phys(new->pages[0]); 475 return new; 476} 477 478static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) 479{ 480 struct agp_memory *new; 481 482 if (type == AGP_DCACHE_MEMORY) { 483 if (pg_count != intel_private.num_dcache_entries) 484 return NULL; 485 486 new = agp_create_memory(1); 487 if (new == NULL) 488 return NULL; 489 490 new->type = AGP_DCACHE_MEMORY; 491 new->page_count = pg_count; 492 new->num_scratch_pages = 0; 493 agp_free_page_array(new); 494 return new; 495 } 496 if (type == AGP_PHYS_MEMORY) 497 return alloc_agpphysmem_i8xx(pg_count, type); 498 return NULL; 499} 500 501static void intel_i810_free_by_type(struct agp_memory *curr) 502{ 503 agp_free_key(curr->key); 504 if (curr->type == AGP_PHYS_MEMORY) { 505 if (curr->page_count == 4) 506 i8xx_destroy_pages(curr->pages[0]); 507 else { 508 agp_bridge->driver->agp_destroy_page(curr->pages[0], 509 AGP_PAGE_DESTROY_UNMAP); 510 agp_bridge->driver->agp_destroy_page(curr->pages[0], 511 AGP_PAGE_DESTROY_FREE); 512 } 513 agp_free_page_array(curr); 514 } 515 kfree(curr); 516} 517 518static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, 519 dma_addr_t addr, int type) 520{ 521 /* Type checking must be done elsewhere */ 522 return addr | bridge->driver->masks[type].mask; 523} 524 525static struct aper_size_info_fixed intel_fake_agp_sizes[] = 526{ 527 {128, 32768, 5}, 528 /* The 64M mode still requires a 128k gatt */ 529 {64, 16384, 5}, 530 {256, 65536, 6}, 531 {512, 131072, 7}, 532}; 533 534static unsigned int intel_gtt_stolen_entries(void) 535{ 536 u16 gmch_ctrl; 537 u8 rdct; 538 int local = 0; 539 static const int ddt[4] = { 0, 16, 32, 64 }; 540 unsigned int overhead_entries, stolen_entries; 541 unsigned int stolen_size = 0; 542 543 pci_read_config_word(intel_private.bridge_dev, 544 I830_GMCH_CTRL, &gmch_ctrl); 545 546 if (IS_G4X || IS_PINEVIEW) 547 overhead_entries = 0; 548 else 549 overhead_entries = intel_private.base.gtt_mappable_entries 550 / 1024; 551 552 overhead_entries += 1; /* BIOS popup */ 553 554 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || 555 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { 556 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 557 case I830_GMCH_GMS_STOLEN_512: 558 stolen_size = KB(512); 559 break; 560 case I830_GMCH_GMS_STOLEN_1024: 561 stolen_size = MB(1); 562 break; 563 case I830_GMCH_GMS_STOLEN_8192: 564 stolen_size = MB(8); 565 break; 566 case I830_GMCH_GMS_LOCAL: 567 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); 568 stolen_size = (I830_RDRAM_ND(rdct) + 1) * 569 MB(ddt[I830_RDRAM_DDT(rdct)]); 570 local = 1; 571 break; 572 default: 573 stolen_size = 0; 574 break; 575 } 576 } else if (IS_SNB) { 577 /* 578 * SandyBridge has new memory control reg at 0x50.w 579 */ 580 u16 snb_gmch_ctl; 581 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 582 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { 583 case SNB_GMCH_GMS_STOLEN_32M: 584 stolen_size = MB(32); 585 break; 586 case SNB_GMCH_GMS_STOLEN_64M: 587 stolen_size = MB(64); 588 break; 589 case SNB_GMCH_GMS_STOLEN_96M: 590 stolen_size = MB(96); 591 break; 592 case SNB_GMCH_GMS_STOLEN_128M: 593 stolen_size = MB(128); 594 break; 595 case SNB_GMCH_GMS_STOLEN_160M: 596 stolen_size = MB(160); 597 break; 598 case SNB_GMCH_GMS_STOLEN_192M: 599 stolen_size = MB(192); 600 break; 601 case SNB_GMCH_GMS_STOLEN_224M: 602 stolen_size = MB(224); 603 break; 604 case SNB_GMCH_GMS_STOLEN_256M: 605 stolen_size = MB(256); 606 break; 607 case SNB_GMCH_GMS_STOLEN_288M: 608 stolen_size = MB(288); 609 break; 610 case SNB_GMCH_GMS_STOLEN_320M: 611 stolen_size = MB(320); 612 break; 613 case SNB_GMCH_GMS_STOLEN_352M: 614 stolen_size = MB(352); 615 break; 616 case SNB_GMCH_GMS_STOLEN_384M: 617 stolen_size = MB(384); 618 break; 619 case SNB_GMCH_GMS_STOLEN_416M: 620 stolen_size = MB(416); 621 break; 622 case SNB_GMCH_GMS_STOLEN_448M: 623 stolen_size = MB(448); 624 break; 625 case SNB_GMCH_GMS_STOLEN_480M: 626 stolen_size = MB(480); 627 break; 628 case SNB_GMCH_GMS_STOLEN_512M: 629 stolen_size = MB(512); 630 break; 631 } 632 } else { 633 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 634 case I855_GMCH_GMS_STOLEN_1M: 635 stolen_size = MB(1); 636 break; 637 case I855_GMCH_GMS_STOLEN_4M: 638 stolen_size = MB(4); 639 break; 640 case I855_GMCH_GMS_STOLEN_8M: 641 stolen_size = MB(8); 642 break; 643 case I855_GMCH_GMS_STOLEN_16M: 644 stolen_size = MB(16); 645 break; 646 case I855_GMCH_GMS_STOLEN_32M: 647 stolen_size = MB(32); 648 break; 649 case I915_GMCH_GMS_STOLEN_48M: 650 stolen_size = MB(48); 651 break; 652 case I915_GMCH_GMS_STOLEN_64M: 653 stolen_size = MB(64); 654 break; 655 case G33_GMCH_GMS_STOLEN_128M: 656 stolen_size = MB(128); 657 break; 658 case G33_GMCH_GMS_STOLEN_256M: 659 stolen_size = MB(256); 660 break; 661 case INTEL_GMCH_GMS_STOLEN_96M: 662 stolen_size = MB(96); 663 break; 664 case INTEL_GMCH_GMS_STOLEN_160M: 665 stolen_size = MB(160); 666 break; 667 case INTEL_GMCH_GMS_STOLEN_224M: 668 stolen_size = MB(224); 669 break; 670 case INTEL_GMCH_GMS_STOLEN_352M: 671 stolen_size = MB(352); 672 break; 673 default: 674 stolen_size = 0; 675 break; 676 } 677 } 678 679 if (!local && stolen_size > intel_max_stolen) { 680 dev_info(&intel_private.bridge_dev->dev, 681 "detected %dK stolen memory, trimming to %dK\n", 682 stolen_size / KB(1), intel_max_stolen / KB(1)); 683 stolen_size = intel_max_stolen; 684 } else if (stolen_size > 0) { 685 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", 686 stolen_size / KB(1), local ? "local" : "stolen"); 687 } else { 688 dev_info(&intel_private.bridge_dev->dev, 689 "no pre-allocated video memory detected\n"); 690 stolen_size = 0; 691 } 692 693 stolen_entries = stolen_size/KB(4) - overhead_entries; 694 695 return stolen_entries; 696} 697 698#if 0 /* extracted code in bad shape, needs some cleaning before use */ 699static unsigned int intel_gtt_total_entries(void) 700{ 701 int size; 702 u16 gmch_ctrl; 703 704 if (IS_I965) { 705 u32 pgetbl_ctl; 706 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); 707 708 /* The 965 has a field telling us the size of the GTT, 709 * which may be larger than what is necessary to map the 710 * aperture. 711 */ 712 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { 713 case I965_PGETBL_SIZE_128KB: 714 size = 128; 715 break; 716 case I965_PGETBL_SIZE_256KB: 717 size = 256; 718 break; 719 case I965_PGETBL_SIZE_512KB: 720 size = 512; 721 break; 722 case I965_PGETBL_SIZE_1MB: 723 size = 1024; 724 break; 725 case I965_PGETBL_SIZE_2MB: 726 size = 2048; 727 break; 728 case I965_PGETBL_SIZE_1_5MB: 729 size = 1024 + 512; 730 break; 731 default: 732 dev_info(&intel_private.pcidev->dev, 733 "unknown page table size, assuming 512KB\n"); 734 size = 512; 735 } 736 size += 4; /* add in BIOS popup space */ 737 } else if (IS_G33 && !IS_PINEVIEW) { 738 /* G33's GTT size defined in gmch_ctrl */ 739 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { 740 case G33_PGETBL_SIZE_1M: 741 size = 1024; 742 break; 743 case G33_PGETBL_SIZE_2M: 744 size = 2048; 745 break; 746 default: 747 dev_info(&intel_private.bridge_dev->dev, 748 "unknown page table size 0x%x, assuming 512KB\n", 749 (gmch_ctrl & G33_PGETBL_SIZE_MASK)); 750 size = 512; 751 } 752 size += 4; 753 } else if (IS_G4X || IS_PINEVIEW) { 754 /* On 4 series hardware, GTT stolen is separate from graphics 755 * stolen, ignore it in stolen gtt entries counting. However, 756 * 4KB of the stolen memory doesn't get mapped to the GTT. 757 */ 758 size = 4; 759 } else { 760 /* On previous hardware, the GTT size was just what was 761 * required to map the aperture. 762 */ 763 size = agp_bridge->driver->fetch_size() + 4; 764 } 765 766 return size/KB(4); 767} 768#endif 769 770static unsigned int intel_gtt_mappable_entries(void) 771{ 772 unsigned int aperture_size; 773 u16 gmch_ctrl; 774 775 aperture_size = 1024 * 1024; 776 777 pci_read_config_word(intel_private.bridge_dev, 778 I830_GMCH_CTRL, &gmch_ctrl); 779 780 switch (intel_private.pcidev->device) { 781 case PCI_DEVICE_ID_INTEL_82830_CGC: 782 case PCI_DEVICE_ID_INTEL_82845G_IG: 783 case PCI_DEVICE_ID_INTEL_82855GM_IG: 784 case PCI_DEVICE_ID_INTEL_82865_IG: 785 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) 786 aperture_size *= 64; 787 else 788 aperture_size *= 128; 789 break; 790 default: 791 /* 9xx supports large sizes, just look at the length */ 792 aperture_size = pci_resource_len(intel_private.pcidev, 2); 793 break; 794 } 795 796 return aperture_size >> PAGE_SHIFT; 797} 798 799static int intel_gtt_init(void) 800{ 801 /* we have to call this as early as possible after the MMIO base address is known */ 802 intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); 803 if (intel_private.base.gtt_stolen_entries == 0) { 804 iounmap(intel_private.registers); 805 return -ENOMEM; 806 } 807 808 return 0; 809} 810 811static int intel_fake_agp_fetch_size(void) 812{ 813 unsigned int aper_size; 814 int i; 815 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); 816 817 aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) 818 / MB(1); 819 820 for (i = 0; i < num_sizes; i++) { 821 if (aper_size == intel_fake_agp_sizes[i].size) { 822 agp_bridge->current_size = intel_fake_agp_sizes + i; 823 return aper_size; 824 } 825 } 826 827 return 0; 828} 829 830static void intel_i830_fini_flush(void) 831{ 832 kunmap(intel_private.i8xx_page); 833 intel_private.i8xx_flush_page = NULL; 834 unmap_page_from_agp(intel_private.i8xx_page); 835 836 __free_page(intel_private.i8xx_page); 837 intel_private.i8xx_page = NULL; 838} 839 840static void intel_i830_setup_flush(void) 841{ 842 /* return if we've already set the flush mechanism up */ 843 if (intel_private.i8xx_page) 844 return; 845 846 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); 847 if (!intel_private.i8xx_page) 848 return; 849 850 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); 851 if (!intel_private.i8xx_flush_page) 852 intel_i830_fini_flush(); 853} 854 855/* The chipset_flush interface needs to get data that has already been 856 * flushed out of the CPU all the way out to main memory, because the GPU 857 * doesn't snoop those buffers. 858 * 859 * The 8xx series doesn't have the same lovely interface for flushing the 860 * chipset write buffers that the later chips do. According to the 865 861 * specs, it's 64 octwords, or 1KB. So, to get those previous things in 862 * that buffer out, we just fill 1KB and clflush it out, on the assumption 863 * that it'll push whatever was in there out. It appears to work. 864 */ 865static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) 866{ 867 unsigned int *pg = intel_private.i8xx_flush_page; 868 869 memset(pg, 0, 1024); 870 871 if (cpu_has_clflush) 872 clflush_cache_range(pg, 1024); 873 else if (wbinvd_on_all_cpus() != 0) 874 printk(KERN_ERR "Timed out waiting for cache flush.\n"); 875} 876 877/* The intel i830 automatically initializes the agp aperture during POST. 878 * Use the memory already set aside for in the GTT. 879 */ 880static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) 881{ 882 int page_order, ret; 883 struct aper_size_info_fixed *size; 884 int num_entries; 885 u32 temp; 886 887 size = agp_bridge->current_size; 888 page_order = size->page_order; 889 num_entries = size->num_entries; 890 agp_bridge->gatt_table_real = NULL; 891 892 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); 893 temp &= 0xfff80000; 894 895 intel_private.registers = ioremap(temp, 128 * 4096); 896 if (!intel_private.registers) 897 return -ENOMEM; 898 899 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 900 global_cache_flush(); /* FIXME: ?? */ 901 902 ret = intel_gtt_init(); 903 if (ret != 0) 904 return ret; 905 906 agp_bridge->gatt_table = NULL; 907 908 agp_bridge->gatt_bus_addr = temp; 909 910 return 0; 911} 912 913/* Return the gatt table to a sane state. Use the top of stolen 914 * memory for the GTT. 915 */ 916static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) 917{ 918 return 0; 919} 920 921static int intel_i830_configure(void) 922{ 923 struct aper_size_info_fixed *current_size; 924 u32 temp; 925 u16 gmch_ctrl; 926 int i; 927 928 current_size = A_SIZE_FIX(agp_bridge->current_size); 929 930 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); 931 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 932 933 pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); 934 gmch_ctrl |= I830_GMCH_ENABLED; 935 pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); 936 937 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); 938 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 939 940 if (agp_bridge->driver->needs_scratch_page) { 941 for (i = intel_private.base.gtt_stolen_entries; i < current_size->num_entries; i++) { 942 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 943 } 944 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ 945 } 946 947 global_cache_flush(); 948 949 intel_i830_setup_flush(); 950 return 0; 951} 952 953static void intel_i830_cleanup(void) 954{ 955 iounmap(intel_private.registers); 956} 957 958static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, 959 int type) 960{ 961 int i, j, num_entries; 962 void *temp; 963 int ret = -EINVAL; 964 int mask_type; 965 966 if (mem->page_count == 0) 967 goto out; 968 969 temp = agp_bridge->current_size; 970 num_entries = A_SIZE_FIX(temp)->num_entries; 971 972 if (pg_start < intel_private.base.gtt_stolen_entries) { 973 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, 974 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", 975 pg_start, intel_private.base.gtt_stolen_entries); 976 977 dev_info(&intel_private.pcidev->dev, 978 "trying to insert into local/stolen memory\n"); 979 goto out_err; 980 } 981 982 if ((pg_start + mem->page_count) > num_entries) 983 goto out_err; 984 985 /* The i830 can't check the GTT for entries since its read only, 986 * depend on the caller to make the correct offset decisions. 987 */ 988 989 if (type != mem->type) 990 goto out_err; 991 992 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 993 994 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && 995 mask_type != INTEL_AGP_CACHED_MEMORY) 996 goto out_err; 997 998 if (!mem->is_flushed) 999 global_cache_flush(); 1000 1001 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1002 writel(agp_bridge->driver->mask_memory(agp_bridge, 1003 page_to_phys(mem->pages[i]), mask_type), 1004 intel_private.registers+I810_PTE_BASE+(j*4)); 1005 } 1006 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); 1007 1008out: 1009 ret = 0; 1010out_err: 1011 mem->is_flushed = true; 1012 return ret; 1013} 1014 1015static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, 1016 int type) 1017{ 1018 int i; 1019 1020 if (mem->page_count == 0) 1021 return 0; 1022 1023 if (pg_start < intel_private.base.gtt_stolen_entries) { 1024 dev_info(&intel_private.pcidev->dev, 1025 "trying to disable local/stolen memory\n"); 1026 return -EINVAL; 1027 } 1028 1029 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1030 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 1031 } 1032 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 1033 1034 return 0; 1035} 1036 1037static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, 1038 int type) 1039{ 1040 if (type == AGP_PHYS_MEMORY) 1041 return alloc_agpphysmem_i8xx(pg_count, type); 1042 /* always return NULL for other allocation types for now */ 1043 return NULL; 1044} 1045 1046static int intel_alloc_chipset_flush_resource(void) 1047{ 1048 int ret; 1049 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE, 1050 PAGE_SIZE, PCIBIOS_MIN_MEM, 0, 1051 pcibios_align_resource, intel_private.bridge_dev); 1052 1053 return ret; 1054} 1055 1056static void intel_i915_setup_chipset_flush(void) 1057{ 1058 int ret; 1059 u32 temp; 1060 1061 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp); 1062 if (!(temp & 0x1)) { 1063 intel_alloc_chipset_flush_resource(); 1064 intel_private.resource_valid = 1; 1065 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); 1066 } else { 1067 temp &= ~1; 1068 1069 intel_private.resource_valid = 1; 1070 intel_private.ifp_resource.start = temp; 1071 intel_private.ifp_resource.end = temp + PAGE_SIZE; 1072 ret = request_resource(&iomem_resource, &intel_private.ifp_resource); 1073 /* some BIOSes reserve this area in a pnp some don't */ 1074 if (ret) 1075 intel_private.resource_valid = 0; 1076 } 1077} 1078 1079static void intel_i965_g33_setup_chipset_flush(void) 1080{ 1081 u32 temp_hi, temp_lo; 1082 int ret; 1083 1084 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi); 1085 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo); 1086 1087 if (!(temp_lo & 0x1)) { 1088 1089 intel_alloc_chipset_flush_resource(); 1090 1091 intel_private.resource_valid = 1; 1092 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, 1093 upper_32_bits(intel_private.ifp_resource.start)); 1094 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); 1095 } else { 1096 u64 l64; 1097 1098 temp_lo &= ~0x1; 1099 l64 = ((u64)temp_hi << 32) | temp_lo; 1100 1101 intel_private.resource_valid = 1; 1102 intel_private.ifp_resource.start = l64; 1103 intel_private.ifp_resource.end = l64 + PAGE_SIZE; 1104 ret = request_resource(&iomem_resource, &intel_private.ifp_resource); 1105 /* some BIOSes reserve this area in a pnp some don't */ 1106 if (ret) 1107 intel_private.resource_valid = 0; 1108 } 1109} 1110 1111static void intel_i9xx_setup_flush(void) 1112{ 1113 /* return if already configured */ 1114 if (intel_private.ifp_resource.start) 1115 return; 1116 1117 if (IS_SNB) 1118 return; 1119 1120 /* setup a resource for this object */ 1121 intel_private.ifp_resource.name = "Intel Flush Page"; 1122 intel_private.ifp_resource.flags = IORESOURCE_MEM; 1123 1124 /* Setup chipset flush for 915 */ 1125 if (IS_I965 || IS_G33 || IS_G4X) { 1126 intel_i965_g33_setup_chipset_flush(); 1127 } else { 1128 intel_i915_setup_chipset_flush(); 1129 } 1130 1131 if (intel_private.ifp_resource.start) 1132 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); 1133 if (!intel_private.i9xx_flush_page) 1134 dev_err(&intel_private.pcidev->dev, 1135 "can't ioremap flush page - no chipset flushing\n"); 1136} 1137 1138static int intel_i9xx_configure(void) 1139{ 1140 struct aper_size_info_fixed *current_size; 1141 u32 temp; 1142 u16 gmch_ctrl; 1143 int i; 1144 1145 current_size = A_SIZE_FIX(agp_bridge->current_size); 1146 1147 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); 1148 1149 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1150 1151 pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); 1152 gmch_ctrl |= I830_GMCH_ENABLED; 1153 pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); 1154 1155 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); 1156 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 1157 1158 if (agp_bridge->driver->needs_scratch_page) { 1159 for (i = intel_private.base.gtt_stolen_entries; i < 1160 intel_private.base.gtt_total_entries; i++) { 1161 writel(agp_bridge->scratch_page, intel_private.gtt+i); 1162 } 1163 readl(intel_private.gtt+i-1); /* PCI Posting. */ 1164 } 1165 1166 global_cache_flush(); 1167 1168 intel_i9xx_setup_flush(); 1169 1170 return 0; 1171} 1172 1173static void intel_i915_cleanup(void) 1174{ 1175 if (intel_private.i9xx_flush_page) 1176 iounmap(intel_private.i9xx_flush_page); 1177 if (intel_private.resource_valid) 1178 release_resource(&intel_private.ifp_resource); 1179 intel_private.ifp_resource.start = 0; 1180 intel_private.resource_valid = 0; 1181 iounmap(intel_private.gtt); 1182 iounmap(intel_private.registers); 1183} 1184 1185static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) 1186{ 1187 if (intel_private.i9xx_flush_page) 1188 writel(1, intel_private.i9xx_flush_page); 1189} 1190 1191static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, 1192 int type) 1193{ 1194 int num_entries; 1195 void *temp; 1196 int ret = -EINVAL; 1197 int mask_type; 1198 1199 if (mem->page_count == 0) 1200 goto out; 1201 1202 temp = agp_bridge->current_size; 1203 num_entries = A_SIZE_FIX(temp)->num_entries; 1204 1205 if (pg_start < intel_private.base.gtt_stolen_entries) { 1206 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, 1207 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", 1208 pg_start, intel_private.base.gtt_stolen_entries); 1209 1210 dev_info(&intel_private.pcidev->dev, 1211 "trying to insert into local/stolen memory\n"); 1212 goto out_err; 1213 } 1214 1215 if ((pg_start + mem->page_count) > num_entries) 1216 goto out_err; 1217 1218 /* The i915 can't check the GTT for entries since it's read only; 1219 * depend on the caller to make the correct offset decisions. 1220 */ 1221 1222 if (type != mem->type) 1223 goto out_err; 1224 1225 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 1226 1227 if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY && 1228 mask_type != INTEL_AGP_CACHED_MEMORY) 1229 goto out_err; 1230 1231 if (!mem->is_flushed) 1232 global_cache_flush(); 1233 1234 intel_agp_insert_sg_entries(mem, pg_start, mask_type); 1235 1236 out: 1237 ret = 0; 1238 out_err: 1239 mem->is_flushed = true; 1240 return ret; 1241} 1242 1243static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, 1244 int type) 1245{ 1246 int i; 1247 1248 if (mem->page_count == 0) 1249 return 0; 1250 1251 if (pg_start < intel_private.base.gtt_stolen_entries) { 1252 dev_info(&intel_private.pcidev->dev, 1253 "trying to disable local/stolen memory\n"); 1254 return -EINVAL; 1255 } 1256 1257 for (i = pg_start; i < (mem->page_count + pg_start); i++) 1258 writel(agp_bridge->scratch_page, intel_private.gtt+i); 1259 1260 readl(intel_private.gtt+i-1); 1261 1262 return 0; 1263} 1264 1265/* Return the aperture size by just checking the resource length. The effect 1266 * described in the spec of the MSAC registers is just changing of the 1267 * resource size. 1268 */ 1269static int intel_i915_get_gtt_size(void) 1270{ 1271 int size; 1272 1273 if (IS_G33) { 1274 u16 gmch_ctrl; 1275 1276 /* G33's GTT size defined in gmch_ctrl */ 1277 pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); 1278 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 1279 case I830_GMCH_GMS_STOLEN_512: 1280 size = 512; 1281 break; 1282 case I830_GMCH_GMS_STOLEN_1024: 1283 size = 1024; 1284 break; 1285 case I830_GMCH_GMS_STOLEN_8192: 1286 size = 8*1024; 1287 break; 1288 default: 1289 dev_info(&intel_private.bridge_dev->dev, 1290 "unknown page table size 0x%x, assuming 512KB\n", 1291 (gmch_ctrl & I830_GMCH_GMS_MASK)); 1292 size = 512; 1293 } 1294 } else { 1295 /* On previous hardware, the GTT size was just what was 1296 * required to map the aperture. 1297 */ 1298 size = agp_bridge->driver->fetch_size(); 1299 } 1300 1301 return KB(size); 1302} 1303 1304/* The intel i915 automatically initializes the agp aperture during POST. 1305 * Use the memory already set aside for in the GTT. 1306 */ 1307static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) 1308{ 1309 int page_order, ret; 1310 struct aper_size_info_fixed *size; 1311 int num_entries; 1312 u32 temp, temp2; 1313 int gtt_map_size; 1314 1315 size = agp_bridge->current_size; 1316 page_order = size->page_order; 1317 num_entries = size->num_entries; 1318 agp_bridge->gatt_table_real = NULL; 1319 1320 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); 1321 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); 1322 1323 gtt_map_size = intel_i915_get_gtt_size(); 1324 1325 intel_private.gtt = ioremap(temp2, gtt_map_size); 1326 if (!intel_private.gtt) 1327 return -ENOMEM; 1328 1329 intel_private.base.gtt_total_entries = gtt_map_size / 4; 1330 1331 temp &= 0xfff80000; 1332 1333 intel_private.registers = ioremap(temp, 128 * 4096); 1334 if (!intel_private.registers) { 1335 iounmap(intel_private.gtt); 1336 return -ENOMEM; 1337 } 1338 1339 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 1340 global_cache_flush(); /* FIXME: ? */ 1341 1342 ret = intel_gtt_init(); 1343 if (ret != 0) { 1344 iounmap(intel_private.gtt); 1345 return ret; 1346 } 1347 1348 agp_bridge->gatt_table = NULL; 1349 1350 agp_bridge->gatt_bus_addr = temp; 1351 1352 return 0; 1353} 1354 1355/* 1356 * The i965 supports 36-bit physical addresses, but to keep 1357 * the format of the GTT the same, the bits that don't fit 1358 * in a 32-bit word are shifted down to bits 4..7. 1359 * 1360 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" 1361 * is always zero on 32-bit architectures, so no need to make 1362 * this conditional. 1363 */ 1364static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, 1365 dma_addr_t addr, int type) 1366{ 1367 /* Shift high bits down */ 1368 addr |= (addr >> 28) & 0xf0; 1369 1370 /* Type checking must be done elsewhere */ 1371 return addr | bridge->driver->masks[type].mask; 1372} 1373 1374static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, 1375 dma_addr_t addr, int type) 1376{ 1377 /* gen6 has bit11-4 for physical addr bit39-32 */ 1378 addr |= (addr >> 28) & 0xff0; 1379 1380 /* Type checking must be done elsewhere */ 1381 return addr | bridge->driver->masks[type].mask; 1382} 1383 1384static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) 1385{ 1386 u16 snb_gmch_ctl; 1387 1388 switch (intel_private.bridge_dev->device) { 1389 case PCI_DEVICE_ID_INTEL_GM45_HB: 1390 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: 1391 case PCI_DEVICE_ID_INTEL_Q45_HB: 1392 case PCI_DEVICE_ID_INTEL_G45_HB: 1393 case PCI_DEVICE_ID_INTEL_G41_HB: 1394 case PCI_DEVICE_ID_INTEL_B43_HB: 1395 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: 1396 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: 1397 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: 1398 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: 1399 *gtt_offset = *gtt_size = MB(2); 1400 break; 1401 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: 1402 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: 1403 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB: 1404 *gtt_offset = MB(2); 1405 1406 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1407 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { 1408 default: 1409 case SNB_GTT_SIZE_0M: 1410 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); 1411 *gtt_size = MB(0); 1412 break; 1413 case SNB_GTT_SIZE_1M: 1414 *gtt_size = MB(1); 1415 break; 1416 case SNB_GTT_SIZE_2M: 1417 *gtt_size = MB(2); 1418 break; 1419 } 1420 break; 1421 default: 1422 *gtt_offset = *gtt_size = KB(512); 1423 } 1424} 1425 1426/* The intel i965 automatically initializes the agp aperture during POST. 1427 * Use the memory already set aside for in the GTT. 1428 */ 1429static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) 1430{ 1431 int page_order, ret; 1432 struct aper_size_info_fixed *size; 1433 int num_entries; 1434 u32 temp; 1435 int gtt_offset, gtt_size; 1436 1437 size = agp_bridge->current_size; 1438 page_order = size->page_order; 1439 num_entries = size->num_entries; 1440 agp_bridge->gatt_table_real = NULL; 1441 1442 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); 1443 1444 temp &= 0xfff00000; 1445 1446 intel_i965_get_gtt_range(>t_offset, >t_size); 1447 1448 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); 1449 1450 if (!intel_private.gtt) 1451 return -ENOMEM; 1452 1453 intel_private.base.gtt_total_entries = gtt_size / 4; 1454 1455 intel_private.registers = ioremap(temp, 128 * 4096); 1456 if (!intel_private.registers) { 1457 iounmap(intel_private.gtt); 1458 return -ENOMEM; 1459 } 1460 1461 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 1462 global_cache_flush(); /* FIXME: ? */ 1463 1464 ret = intel_gtt_init(); 1465 if (ret != 0) { 1466 iounmap(intel_private.gtt); 1467 return ret; 1468 } 1469 1470 agp_bridge->gatt_table = NULL; 1471 1472 agp_bridge->gatt_bus_addr = temp; 1473 1474 return 0; 1475} 1476 1477static const struct agp_bridge_driver intel_810_driver = { 1478 .owner = THIS_MODULE, 1479 .aperture_sizes = intel_i810_sizes, 1480 .size_type = FIXED_APER_SIZE, 1481 .num_aperture_sizes = 2, 1482 .needs_scratch_page = true, 1483 .configure = intel_i810_configure, 1484 .fetch_size = intel_i810_fetch_size, 1485 .cleanup = intel_i810_cleanup, 1486 .mask_memory = intel_i810_mask_memory, 1487 .masks = intel_i810_masks, 1488 .agp_enable = intel_fake_agp_enable, 1489 .cache_flush = global_cache_flush, 1490 .create_gatt_table = agp_generic_create_gatt_table, 1491 .free_gatt_table = agp_generic_free_gatt_table, 1492 .insert_memory = intel_i810_insert_entries, 1493 .remove_memory = intel_i810_remove_entries, 1494 .alloc_by_type = intel_i810_alloc_by_type, 1495 .free_by_type = intel_i810_free_by_type, 1496 .agp_alloc_page = agp_generic_alloc_page, 1497 .agp_alloc_pages = agp_generic_alloc_pages, 1498 .agp_destroy_page = agp_generic_destroy_page, 1499 .agp_destroy_pages = agp_generic_destroy_pages, 1500 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1501}; 1502 1503static const struct agp_bridge_driver intel_830_driver = { 1504 .owner = THIS_MODULE, 1505 .aperture_sizes = intel_fake_agp_sizes, 1506 .size_type = FIXED_APER_SIZE, 1507 .num_aperture_sizes = 4, 1508 .needs_scratch_page = true, 1509 .configure = intel_i830_configure, 1510 .fetch_size = intel_fake_agp_fetch_size, 1511 .cleanup = intel_i830_cleanup, 1512 .mask_memory = intel_i810_mask_memory, 1513 .masks = intel_i810_masks, 1514 .agp_enable = intel_fake_agp_enable, 1515 .cache_flush = global_cache_flush, 1516 .create_gatt_table = intel_i830_create_gatt_table, 1517 .free_gatt_table = intel_fake_agp_free_gatt_table, 1518 .insert_memory = intel_i830_insert_entries, 1519 .remove_memory = intel_i830_remove_entries, 1520 .alloc_by_type = intel_fake_agp_alloc_by_type, 1521 .free_by_type = intel_i810_free_by_type, 1522 .agp_alloc_page = agp_generic_alloc_page, 1523 .agp_alloc_pages = agp_generic_alloc_pages, 1524 .agp_destroy_page = agp_generic_destroy_page, 1525 .agp_destroy_pages = agp_generic_destroy_pages, 1526 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1527 .chipset_flush = intel_i830_chipset_flush, 1528}; 1529 1530static const struct agp_bridge_driver intel_915_driver = { 1531 .owner = THIS_MODULE, 1532 .aperture_sizes = intel_fake_agp_sizes, 1533 .size_type = FIXED_APER_SIZE, 1534 .num_aperture_sizes = 4, 1535 .needs_scratch_page = true, 1536 .configure = intel_i9xx_configure, 1537 .fetch_size = intel_fake_agp_fetch_size, 1538 .cleanup = intel_i915_cleanup, 1539 .mask_memory = intel_i810_mask_memory, 1540 .masks = intel_i810_masks, 1541 .agp_enable = intel_fake_agp_enable, 1542 .cache_flush = global_cache_flush, 1543 .create_gatt_table = intel_i915_create_gatt_table, 1544 .free_gatt_table = intel_fake_agp_free_gatt_table, 1545 .insert_memory = intel_i915_insert_entries, 1546 .remove_memory = intel_i915_remove_entries, 1547 .alloc_by_type = intel_fake_agp_alloc_by_type, 1548 .free_by_type = intel_i810_free_by_type, 1549 .agp_alloc_page = agp_generic_alloc_page, 1550 .agp_alloc_pages = agp_generic_alloc_pages, 1551 .agp_destroy_page = agp_generic_destroy_page, 1552 .agp_destroy_pages = agp_generic_destroy_pages, 1553 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1554 .chipset_flush = intel_i915_chipset_flush, 1555#ifdef USE_PCI_DMA_API 1556 .agp_map_page = intel_agp_map_page, 1557 .agp_unmap_page = intel_agp_unmap_page, 1558 .agp_map_memory = intel_agp_map_memory, 1559 .agp_unmap_memory = intel_agp_unmap_memory, 1560#endif 1561}; 1562 1563static const struct agp_bridge_driver intel_i965_driver = { 1564 .owner = THIS_MODULE, 1565 .aperture_sizes = intel_fake_agp_sizes, 1566 .size_type = FIXED_APER_SIZE, 1567 .num_aperture_sizes = 4, 1568 .needs_scratch_page = true, 1569 .configure = intel_i9xx_configure, 1570 .fetch_size = intel_fake_agp_fetch_size, 1571 .cleanup = intel_i915_cleanup, 1572 .mask_memory = intel_i965_mask_memory, 1573 .masks = intel_i810_masks, 1574 .agp_enable = intel_fake_agp_enable, 1575 .cache_flush = global_cache_flush, 1576 .create_gatt_table = intel_i965_create_gatt_table, 1577 .free_gatt_table = intel_fake_agp_free_gatt_table, 1578 .insert_memory = intel_i915_insert_entries, 1579 .remove_memory = intel_i915_remove_entries, 1580 .alloc_by_type = intel_fake_agp_alloc_by_type, 1581 .free_by_type = intel_i810_free_by_type, 1582 .agp_alloc_page = agp_generic_alloc_page, 1583 .agp_alloc_pages = agp_generic_alloc_pages, 1584 .agp_destroy_page = agp_generic_destroy_page, 1585 .agp_destroy_pages = agp_generic_destroy_pages, 1586 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1587 .chipset_flush = intel_i915_chipset_flush, 1588#ifdef USE_PCI_DMA_API 1589 .agp_map_page = intel_agp_map_page, 1590 .agp_unmap_page = intel_agp_unmap_page, 1591 .agp_map_memory = intel_agp_map_memory, 1592 .agp_unmap_memory = intel_agp_unmap_memory, 1593#endif 1594}; 1595 1596static const struct agp_bridge_driver intel_gen6_driver = { 1597 .owner = THIS_MODULE, 1598 .aperture_sizes = intel_fake_agp_sizes, 1599 .size_type = FIXED_APER_SIZE, 1600 .num_aperture_sizes = 4, 1601 .needs_scratch_page = true, 1602 .configure = intel_i9xx_configure, 1603 .fetch_size = intel_fake_agp_fetch_size, 1604 .cleanup = intel_i915_cleanup, 1605 .mask_memory = intel_gen6_mask_memory, 1606 .masks = intel_gen6_masks, 1607 .agp_enable = intel_fake_agp_enable, 1608 .cache_flush = global_cache_flush, 1609 .create_gatt_table = intel_i965_create_gatt_table, 1610 .free_gatt_table = intel_fake_agp_free_gatt_table, 1611 .insert_memory = intel_i915_insert_entries, 1612 .remove_memory = intel_i915_remove_entries, 1613 .alloc_by_type = intel_fake_agp_alloc_by_type, 1614 .free_by_type = intel_i810_free_by_type, 1615 .agp_alloc_page = agp_generic_alloc_page, 1616 .agp_alloc_pages = agp_generic_alloc_pages, 1617 .agp_destroy_page = agp_generic_destroy_page, 1618 .agp_destroy_pages = agp_generic_destroy_pages, 1619 .agp_type_to_mask_type = intel_gen6_type_to_mask_type, 1620 .chipset_flush = intel_i915_chipset_flush, 1621#ifdef USE_PCI_DMA_API 1622 .agp_map_page = intel_agp_map_page, 1623 .agp_unmap_page = intel_agp_unmap_page, 1624 .agp_map_memory = intel_agp_map_memory, 1625 .agp_unmap_memory = intel_agp_unmap_memory, 1626#endif 1627}; 1628 1629static const struct agp_bridge_driver intel_g33_driver = { 1630 .owner = THIS_MODULE, 1631 .aperture_sizes = intel_fake_agp_sizes, 1632 .size_type = FIXED_APER_SIZE, 1633 .num_aperture_sizes = 4, 1634 .needs_scratch_page = true, 1635 .configure = intel_i9xx_configure, 1636 .fetch_size = intel_fake_agp_fetch_size, 1637 .cleanup = intel_i915_cleanup, 1638 .mask_memory = intel_i965_mask_memory, 1639 .masks = intel_i810_masks, 1640 .agp_enable = intel_fake_agp_enable, 1641 .cache_flush = global_cache_flush, 1642 .create_gatt_table = intel_i915_create_gatt_table, 1643 .free_gatt_table = intel_fake_agp_free_gatt_table, 1644 .insert_memory = intel_i915_insert_entries, 1645 .remove_memory = intel_i915_remove_entries, 1646 .alloc_by_type = intel_fake_agp_alloc_by_type, 1647 .free_by_type = intel_i810_free_by_type, 1648 .agp_alloc_page = agp_generic_alloc_page, 1649 .agp_alloc_pages = agp_generic_alloc_pages, 1650 .agp_destroy_page = agp_generic_destroy_page, 1651 .agp_destroy_pages = agp_generic_destroy_pages, 1652 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1653 .chipset_flush = intel_i915_chipset_flush, 1654#ifdef USE_PCI_DMA_API 1655 .agp_map_page = intel_agp_map_page, 1656 .agp_unmap_page = intel_agp_unmap_page, 1657 .agp_map_memory = intel_agp_map_memory, 1658 .agp_unmap_memory = intel_agp_unmap_memory, 1659#endif 1660}; 1661 1662/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of 1663 * driver and gmch_driver must be non-null, and find_gmch will determine 1664 * which one should be used if a gmch_chip_id is present. 1665 */ 1666static const struct intel_gtt_driver_description { 1667 unsigned int gmch_chip_id; 1668 char *name; 1669 const struct agp_bridge_driver *gmch_driver; 1670} intel_gtt_chipsets[] = { 1671 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver }, 1672 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver }, 1673 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver }, 1674 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver }, 1675 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", &intel_830_driver }, 1676 { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", &intel_830_driver }, 1677 { PCI_DEVICE_ID_INTEL_82854_IG, "854", &intel_830_driver }, 1678 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", &intel_830_driver }, 1679 { PCI_DEVICE_ID_INTEL_82865_IG, "865", &intel_830_driver }, 1680 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", &intel_915_driver }, 1681 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", &intel_915_driver }, 1682 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", &intel_915_driver }, 1683 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", &intel_915_driver }, 1684 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", &intel_915_driver }, 1685 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", &intel_915_driver }, 1686 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", &intel_i965_driver }, 1687 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", &intel_i965_driver }, 1688 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", &intel_i965_driver }, 1689 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", &intel_i965_driver }, 1690 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", &intel_i965_driver }, 1691 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", &intel_i965_driver }, 1692 { PCI_DEVICE_ID_INTEL_G33_IG, "G33", &intel_g33_driver }, 1693 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", &intel_g33_driver }, 1694 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", &intel_g33_driver }, 1695 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", &intel_g33_driver }, 1696 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", &intel_g33_driver }, 1697 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", &intel_i965_driver }, 1698 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", &intel_i965_driver }, 1699 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", &intel_i965_driver }, 1700 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", &intel_i965_driver }, 1701 { PCI_DEVICE_ID_INTEL_B43_IG, "B43", &intel_i965_driver }, 1702 { PCI_DEVICE_ID_INTEL_G41_IG, "G41", &intel_i965_driver }, 1703 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 1704 "HD Graphics", &intel_i965_driver }, 1705 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 1706 "HD Graphics", &intel_i965_driver }, 1707 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, 1708 "Sandybridge", &intel_gen6_driver }, 1709 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, 1710 "Sandybridge", &intel_gen6_driver }, 1711 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, 1712 "Sandybridge", &intel_gen6_driver }, 1713 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, 1714 "Sandybridge", &intel_gen6_driver }, 1715 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, 1716 "Sandybridge", &intel_gen6_driver }, 1717 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, 1718 "Sandybridge", &intel_gen6_driver }, 1719 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, 1720 "Sandybridge", &intel_gen6_driver }, 1721 { 0, NULL, NULL } 1722}; 1723 1724static int find_gmch(u16 device) 1725{ 1726 struct pci_dev *gmch_device; 1727 1728 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); 1729 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { 1730 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, 1731 device, gmch_device); 1732 } 1733 1734 if (!gmch_device) 1735 return 0; 1736 1737 intel_private.pcidev = gmch_device; 1738 return 1; 1739} 1740 1741int intel_gmch_probe(struct pci_dev *pdev, 1742 struct agp_bridge_data *bridge) 1743{ 1744 int i, mask; 1745 bridge->driver = NULL; 1746 1747 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { 1748 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { 1749 bridge->driver = 1750 intel_gtt_chipsets[i].gmch_driver; 1751 break; 1752 } 1753 } 1754 1755 if (!bridge->driver) 1756 return 0; 1757 1758 bridge->dev_private_data = &intel_private; 1759 bridge->dev = pdev; 1760 1761 intel_private.bridge_dev = pci_dev_get(pdev); 1762 1763 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); 1764 1765 if (bridge->driver->mask_memory == intel_gen6_mask_memory) 1766 mask = 40; 1767 else if (bridge->driver->mask_memory == intel_i965_mask_memory) 1768 mask = 36; 1769 else 1770 mask = 32; 1771 1772 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) 1773 dev_err(&intel_private.pcidev->dev, 1774 "set gfx device dma mask %d-bit failed!\n", mask); 1775 else 1776 pci_set_consistent_dma_mask(intel_private.pcidev, 1777 DMA_BIT_MASK(mask)); 1778 1779 if (bridge->driver == &intel_810_driver) 1780 return 1; 1781 1782 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); 1783 1784 return 1; 1785} 1786EXPORT_SYMBOL(intel_gmch_probe); 1787 1788void intel_gmch_remove(struct pci_dev *pdev) 1789{ 1790 if (intel_private.pcidev) 1791 pci_dev_put(intel_private.pcidev); 1792 if (intel_private.bridge_dev) 1793 pci_dev_put(intel_private.bridge_dev); 1794} 1795EXPORT_SYMBOL(intel_gmch_remove); 1796 1797MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 1798MODULE_LICENSE("GPL and additional rights"); 1799