i915_dma.c revision e20f9c64c79e2282f9eb531509181965ec8f0a92
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include "drmP.h" 30#include "drm.h" 31#include "drm_crtc_helper.h" 32#include "drm_fb_helper.h" 33#include "intel_drv.h" 34#include "i915_drm.h" 35#include "i915_drv.h" 36#include "i915_trace.h" 37#include <linux/vgaarb.h> 38#include <linux/acpi.h> 39#include <linux/pnp.h> 40#include <linux/vga_switcheroo.h> 41#include <linux/slab.h> 42 43/** 44 * Sets up the hardware status page for devices that need a physical address 45 * in the register. 46 */ 47static int i915_init_phys_hws(struct drm_device *dev) 48{ 49 drm_i915_private_t *dev_priv = dev->dev_private; 50 /* Program Hardware Status Page */ 51 dev_priv->status_page_dmah = 52 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 53 54 if (!dev_priv->status_page_dmah) { 55 DRM_ERROR("Can not allocate hardware status page\n"); 56 return -ENOMEM; 57 } 58 dev_priv->render_ring.status_page.page_addr 59 = dev_priv->status_page_dmah->vaddr; 60 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 61 62 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 63 64 if (IS_I965G(dev)) 65 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 66 0xf0; 67 68 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 69 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 70 return 0; 71} 72 73/** 74 * Frees the hardware status page, whether it's a physical address or a virtual 75 * address set up by the X Server. 76 */ 77static void i915_free_hws(struct drm_device *dev) 78{ 79 drm_i915_private_t *dev_priv = dev->dev_private; 80 if (dev_priv->status_page_dmah) { 81 drm_pci_free(dev, dev_priv->status_page_dmah); 82 dev_priv->status_page_dmah = NULL; 83 } 84 85 if (dev_priv->render_ring.status_page.gfx_addr) { 86 dev_priv->render_ring.status_page.gfx_addr = 0; 87 drm_core_ioremapfree(&dev_priv->hws_map, dev); 88 } 89 90 /* Need to rewrite hardware status page */ 91 I915_WRITE(HWS_PGA, 0x1ffff000); 92} 93 94void i915_kernel_lost_context(struct drm_device * dev) 95{ 96 drm_i915_private_t *dev_priv = dev->dev_private; 97 struct drm_i915_master_private *master_priv; 98 struct intel_ring_buffer *ring = &dev_priv->render_ring; 99 100 /* 101 * We should never lose context on the ring with modesetting 102 * as we don't expose it to userspace 103 */ 104 if (drm_core_check_feature(dev, DRIVER_MODESET)) 105 return; 106 107 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 108 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 109 ring->space = ring->head - (ring->tail + 8); 110 if (ring->space < 0) 111 ring->space += ring->size; 112 113 if (!dev->primary->master) 114 return; 115 116 master_priv = dev->primary->master->driver_priv; 117 if (ring->head == ring->tail && master_priv->sarea_priv) 118 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 119} 120 121static int i915_dma_cleanup(struct drm_device * dev) 122{ 123 drm_i915_private_t *dev_priv = dev->dev_private; 124 /* Make sure interrupts are disabled here because the uninstall ioctl 125 * may not have been called from userspace and after dev_private 126 * is freed, it's too late. 127 */ 128 if (dev->irq_enabled) 129 drm_irq_uninstall(dev); 130 131 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 132 if (HAS_BSD(dev)) 133 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 134 135 /* Clear the HWS virtual address at teardown */ 136 if (I915_NEED_GFX_HWS(dev)) 137 i915_free_hws(dev); 138 139 return 0; 140} 141 142static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 143{ 144 drm_i915_private_t *dev_priv = dev->dev_private; 145 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 146 147 master_priv->sarea = drm_getsarea(dev); 148 if (master_priv->sarea) { 149 master_priv->sarea_priv = (drm_i915_sarea_t *) 150 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 151 } else { 152 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); 153 } 154 155 if (init->ring_size != 0) { 156 if (dev_priv->render_ring.gem_object != NULL) { 157 i915_dma_cleanup(dev); 158 DRM_ERROR("Client tried to initialize ringbuffer in " 159 "GEM mode\n"); 160 return -EINVAL; 161 } 162 163 dev_priv->render_ring.size = init->ring_size; 164 165 dev_priv->render_ring.map.offset = init->ring_start; 166 dev_priv->render_ring.map.size = init->ring_size; 167 dev_priv->render_ring.map.type = 0; 168 dev_priv->render_ring.map.flags = 0; 169 dev_priv->render_ring.map.mtrr = 0; 170 171 drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); 172 173 if (dev_priv->render_ring.map.handle == NULL) { 174 i915_dma_cleanup(dev); 175 DRM_ERROR("can not ioremap virtual address for" 176 " ring buffer\n"); 177 return -ENOMEM; 178 } 179 } 180 181 dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; 182 183 dev_priv->cpp = init->cpp; 184 dev_priv->back_offset = init->back_offset; 185 dev_priv->front_offset = init->front_offset; 186 dev_priv->current_page = 0; 187 if (master_priv->sarea_priv) 188 master_priv->sarea_priv->pf_current_page = 0; 189 190 /* Allow hardware batchbuffers unless told otherwise. 191 */ 192 dev_priv->allow_batchbuffer = 1; 193 194 return 0; 195} 196 197static int i915_dma_resume(struct drm_device * dev) 198{ 199 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 200 201 struct intel_ring_buffer *ring; 202 DRM_DEBUG_DRIVER("%s\n", __func__); 203 204 ring = &dev_priv->render_ring; 205 206 if (ring->map.handle == NULL) { 207 DRM_ERROR("can not ioremap virtual address for" 208 " ring buffer\n"); 209 return -ENOMEM; 210 } 211 212 /* Program Hardware Status Page */ 213 if (!ring->status_page.page_addr) { 214 DRM_ERROR("Can not find hardware status page\n"); 215 return -EINVAL; 216 } 217 DRM_DEBUG_DRIVER("hw status page @ %p\n", 218 ring->status_page.page_addr); 219 if (ring->status_page.gfx_addr != 0) 220 ring->setup_status_page(dev, ring); 221 else 222 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 223 224 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 225 226 return 0; 227} 228 229static int i915_dma_init(struct drm_device *dev, void *data, 230 struct drm_file *file_priv) 231{ 232 drm_i915_init_t *init = data; 233 int retcode = 0; 234 235 switch (init->func) { 236 case I915_INIT_DMA: 237 retcode = i915_initialize(dev, init); 238 break; 239 case I915_CLEANUP_DMA: 240 retcode = i915_dma_cleanup(dev); 241 break; 242 case I915_RESUME_DMA: 243 retcode = i915_dma_resume(dev); 244 break; 245 default: 246 retcode = -EINVAL; 247 break; 248 } 249 250 return retcode; 251} 252 253/* Implement basically the same security restrictions as hardware does 254 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 255 * 256 * Most of the calculations below involve calculating the size of a 257 * particular instruction. It's important to get the size right as 258 * that tells us where the next instruction to check is. Any illegal 259 * instruction detected will be given a size of zero, which is a 260 * signal to abort the rest of the buffer. 261 */ 262static int do_validate_cmd(int cmd) 263{ 264 switch (((cmd >> 29) & 0x7)) { 265 case 0x0: 266 switch ((cmd >> 23) & 0x3f) { 267 case 0x0: 268 return 1; /* MI_NOOP */ 269 case 0x4: 270 return 1; /* MI_FLUSH */ 271 default: 272 return 0; /* disallow everything else */ 273 } 274 break; 275 case 0x1: 276 return 0; /* reserved */ 277 case 0x2: 278 return (cmd & 0xff) + 2; /* 2d commands */ 279 case 0x3: 280 if (((cmd >> 24) & 0x1f) <= 0x18) 281 return 1; 282 283 switch ((cmd >> 24) & 0x1f) { 284 case 0x1c: 285 return 1; 286 case 0x1d: 287 switch ((cmd >> 16) & 0xff) { 288 case 0x3: 289 return (cmd & 0x1f) + 2; 290 case 0x4: 291 return (cmd & 0xf) + 2; 292 default: 293 return (cmd & 0xffff) + 2; 294 } 295 case 0x1e: 296 if (cmd & (1 << 23)) 297 return (cmd & 0xffff) + 1; 298 else 299 return 1; 300 case 0x1f: 301 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 302 return (cmd & 0x1ffff) + 2; 303 else if (cmd & (1 << 17)) /* indirect random */ 304 if ((cmd & 0xffff) == 0) 305 return 0; /* unknown length, too hard */ 306 else 307 return (((cmd & 0xffff) + 1) / 2) + 1; 308 else 309 return 2; /* indirect sequential */ 310 default: 311 return 0; 312 } 313 default: 314 return 0; 315 } 316 317 return 0; 318} 319 320static int validate_cmd(int cmd) 321{ 322 int ret = do_validate_cmd(cmd); 323 324/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ 325 326 return ret; 327} 328 329static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 330{ 331 drm_i915_private_t *dev_priv = dev->dev_private; 332 int i; 333 334 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) 335 return -EINVAL; 336 337 BEGIN_LP_RING((dwords+1)&~1); 338 339 for (i = 0; i < dwords;) { 340 int cmd, sz; 341 342 cmd = buffer[i]; 343 344 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 345 return -EINVAL; 346 347 OUT_RING(cmd); 348 349 while (++i, --sz) { 350 OUT_RING(buffer[i]); 351 } 352 } 353 354 if (dwords & 1) 355 OUT_RING(0); 356 357 ADVANCE_LP_RING(); 358 359 return 0; 360} 361 362int 363i915_emit_box(struct drm_device *dev, 364 struct drm_clip_rect *boxes, 365 int i, int DR1, int DR4) 366{ 367 struct drm_clip_rect box = boxes[i]; 368 369 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 370 DRM_ERROR("Bad box %d,%d..%d,%d\n", 371 box.x1, box.y1, box.x2, box.y2); 372 return -EINVAL; 373 } 374 375 if (IS_I965G(dev)) { 376 BEGIN_LP_RING(4); 377 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 378 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 379 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 380 OUT_RING(DR4); 381 ADVANCE_LP_RING(); 382 } else { 383 BEGIN_LP_RING(6); 384 OUT_RING(GFX_OP_DRAWRECT_INFO); 385 OUT_RING(DR1); 386 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 387 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 388 OUT_RING(DR4); 389 OUT_RING(0); 390 ADVANCE_LP_RING(); 391 } 392 393 return 0; 394} 395 396/* XXX: Emitting the counter should really be moved to part of the IRQ 397 * emit. For now, do it in both places: 398 */ 399 400static void i915_emit_breadcrumb(struct drm_device *dev) 401{ 402 drm_i915_private_t *dev_priv = dev->dev_private; 403 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 404 405 dev_priv->counter++; 406 if (dev_priv->counter > 0x7FFFFFFFUL) 407 dev_priv->counter = 0; 408 if (master_priv->sarea_priv) 409 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 410 411 BEGIN_LP_RING(4); 412 OUT_RING(MI_STORE_DWORD_INDEX); 413 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 414 OUT_RING(dev_priv->counter); 415 OUT_RING(0); 416 ADVANCE_LP_RING(); 417} 418 419static int i915_dispatch_cmdbuffer(struct drm_device * dev, 420 drm_i915_cmdbuffer_t *cmd, 421 struct drm_clip_rect *cliprects, 422 void *cmdbuf) 423{ 424 int nbox = cmd->num_cliprects; 425 int i = 0, count, ret; 426 427 if (cmd->sz & 0x3) { 428 DRM_ERROR("alignment"); 429 return -EINVAL; 430 } 431 432 i915_kernel_lost_context(dev); 433 434 count = nbox ? nbox : 1; 435 436 for (i = 0; i < count; i++) { 437 if (i < nbox) { 438 ret = i915_emit_box(dev, cliprects, i, 439 cmd->DR1, cmd->DR4); 440 if (ret) 441 return ret; 442 } 443 444 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 445 if (ret) 446 return ret; 447 } 448 449 i915_emit_breadcrumb(dev); 450 return 0; 451} 452 453static int i915_dispatch_batchbuffer(struct drm_device * dev, 454 drm_i915_batchbuffer_t * batch, 455 struct drm_clip_rect *cliprects) 456{ 457 int nbox = batch->num_cliprects; 458 int i = 0, count; 459 460 if ((batch->start | batch->used) & 0x7) { 461 DRM_ERROR("alignment"); 462 return -EINVAL; 463 } 464 465 i915_kernel_lost_context(dev); 466 467 count = nbox ? nbox : 1; 468 469 for (i = 0; i < count; i++) { 470 if (i < nbox) { 471 int ret = i915_emit_box(dev, cliprects, i, 472 batch->DR1, batch->DR4); 473 if (ret) 474 return ret; 475 } 476 477 if (!IS_I830(dev) && !IS_845G(dev)) { 478 BEGIN_LP_RING(2); 479 if (IS_I965G(dev)) { 480 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 481 OUT_RING(batch->start); 482 } else { 483 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 484 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 485 } 486 ADVANCE_LP_RING(); 487 } else { 488 BEGIN_LP_RING(4); 489 OUT_RING(MI_BATCH_BUFFER); 490 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 491 OUT_RING(batch->start + batch->used - 4); 492 OUT_RING(0); 493 ADVANCE_LP_RING(); 494 } 495 } 496 497 i915_emit_breadcrumb(dev); 498 499 return 0; 500} 501 502static int i915_dispatch_flip(struct drm_device * dev) 503{ 504 drm_i915_private_t *dev_priv = dev->dev_private; 505 struct drm_i915_master_private *master_priv = 506 dev->primary->master->driver_priv; 507 508 if (!master_priv->sarea_priv) 509 return -EINVAL; 510 511 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 512 __func__, 513 dev_priv->current_page, 514 master_priv->sarea_priv->pf_current_page); 515 516 i915_kernel_lost_context(dev); 517 518 BEGIN_LP_RING(2); 519 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 520 OUT_RING(0); 521 ADVANCE_LP_RING(); 522 523 BEGIN_LP_RING(6); 524 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 525 OUT_RING(0); 526 if (dev_priv->current_page == 0) { 527 OUT_RING(dev_priv->back_offset); 528 dev_priv->current_page = 1; 529 } else { 530 OUT_RING(dev_priv->front_offset); 531 dev_priv->current_page = 0; 532 } 533 OUT_RING(0); 534 ADVANCE_LP_RING(); 535 536 BEGIN_LP_RING(2); 537 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 538 OUT_RING(0); 539 ADVANCE_LP_RING(); 540 541 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 542 543 BEGIN_LP_RING(4); 544 OUT_RING(MI_STORE_DWORD_INDEX); 545 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 546 OUT_RING(dev_priv->counter); 547 OUT_RING(0); 548 ADVANCE_LP_RING(); 549 550 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 551 return 0; 552} 553 554static int i915_quiescent(struct drm_device * dev) 555{ 556 drm_i915_private_t *dev_priv = dev->dev_private; 557 558 i915_kernel_lost_context(dev); 559 return intel_wait_ring_buffer(dev, &dev_priv->render_ring, 560 dev_priv->render_ring.size - 8); 561} 562 563static int i915_flush_ioctl(struct drm_device *dev, void *data, 564 struct drm_file *file_priv) 565{ 566 int ret; 567 568 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 569 570 mutex_lock(&dev->struct_mutex); 571 ret = i915_quiescent(dev); 572 mutex_unlock(&dev->struct_mutex); 573 574 return ret; 575} 576 577static int i915_batchbuffer(struct drm_device *dev, void *data, 578 struct drm_file *file_priv) 579{ 580 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 581 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 582 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 583 master_priv->sarea_priv; 584 drm_i915_batchbuffer_t *batch = data; 585 int ret; 586 struct drm_clip_rect *cliprects = NULL; 587 588 if (!dev_priv->allow_batchbuffer) { 589 DRM_ERROR("Batchbuffer ioctl disabled\n"); 590 return -EINVAL; 591 } 592 593 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", 594 batch->start, batch->used, batch->num_cliprects); 595 596 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 597 598 if (batch->num_cliprects < 0) 599 return -EINVAL; 600 601 if (batch->num_cliprects) { 602 cliprects = kcalloc(batch->num_cliprects, 603 sizeof(struct drm_clip_rect), 604 GFP_KERNEL); 605 if (cliprects == NULL) 606 return -ENOMEM; 607 608 ret = copy_from_user(cliprects, batch->cliprects, 609 batch->num_cliprects * 610 sizeof(struct drm_clip_rect)); 611 if (ret != 0) 612 goto fail_free; 613 } 614 615 mutex_lock(&dev->struct_mutex); 616 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 617 mutex_unlock(&dev->struct_mutex); 618 619 if (sarea_priv) 620 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 621 622fail_free: 623 kfree(cliprects); 624 625 return ret; 626} 627 628static int i915_cmdbuffer(struct drm_device *dev, void *data, 629 struct drm_file *file_priv) 630{ 631 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 632 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 633 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 634 master_priv->sarea_priv; 635 drm_i915_cmdbuffer_t *cmdbuf = data; 636 struct drm_clip_rect *cliprects = NULL; 637 void *batch_data; 638 int ret; 639 640 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 641 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 642 643 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 644 645 if (cmdbuf->num_cliprects < 0) 646 return -EINVAL; 647 648 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); 649 if (batch_data == NULL) 650 return -ENOMEM; 651 652 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 653 if (ret != 0) 654 goto fail_batch_free; 655 656 if (cmdbuf->num_cliprects) { 657 cliprects = kcalloc(cmdbuf->num_cliprects, 658 sizeof(struct drm_clip_rect), GFP_KERNEL); 659 if (cliprects == NULL) { 660 ret = -ENOMEM; 661 goto fail_batch_free; 662 } 663 664 ret = copy_from_user(cliprects, cmdbuf->cliprects, 665 cmdbuf->num_cliprects * 666 sizeof(struct drm_clip_rect)); 667 if (ret != 0) 668 goto fail_clip_free; 669 } 670 671 mutex_lock(&dev->struct_mutex); 672 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 673 mutex_unlock(&dev->struct_mutex); 674 if (ret) { 675 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 676 goto fail_clip_free; 677 } 678 679 if (sarea_priv) 680 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 681 682fail_clip_free: 683 kfree(cliprects); 684fail_batch_free: 685 kfree(batch_data); 686 687 return ret; 688} 689 690static int i915_flip_bufs(struct drm_device *dev, void *data, 691 struct drm_file *file_priv) 692{ 693 int ret; 694 695 DRM_DEBUG_DRIVER("%s\n", __func__); 696 697 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 698 699 mutex_lock(&dev->struct_mutex); 700 ret = i915_dispatch_flip(dev); 701 mutex_unlock(&dev->struct_mutex); 702 703 return ret; 704} 705 706static int i915_getparam(struct drm_device *dev, void *data, 707 struct drm_file *file_priv) 708{ 709 drm_i915_private_t *dev_priv = dev->dev_private; 710 drm_i915_getparam_t *param = data; 711 int value; 712 713 if (!dev_priv) { 714 DRM_ERROR("called with no initialization\n"); 715 return -EINVAL; 716 } 717 718 switch (param->param) { 719 case I915_PARAM_IRQ_ACTIVE: 720 value = dev->pdev->irq ? 1 : 0; 721 break; 722 case I915_PARAM_ALLOW_BATCHBUFFER: 723 value = dev_priv->allow_batchbuffer ? 1 : 0; 724 break; 725 case I915_PARAM_LAST_DISPATCH: 726 value = READ_BREADCRUMB(dev_priv); 727 break; 728 case I915_PARAM_CHIPSET_ID: 729 value = dev->pci_device; 730 break; 731 case I915_PARAM_HAS_GEM: 732 value = dev_priv->has_gem; 733 break; 734 case I915_PARAM_NUM_FENCES_AVAIL: 735 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 736 break; 737 case I915_PARAM_HAS_OVERLAY: 738 value = dev_priv->overlay ? 1 : 0; 739 break; 740 case I915_PARAM_HAS_PAGEFLIPPING: 741 value = 1; 742 break; 743 case I915_PARAM_HAS_EXECBUF2: 744 /* depends on GEM */ 745 value = dev_priv->has_gem; 746 break; 747 default: 748 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 749 param->param); 750 return -EINVAL; 751 } 752 753 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 754 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 755 return -EFAULT; 756 } 757 758 return 0; 759} 760 761static int i915_setparam(struct drm_device *dev, void *data, 762 struct drm_file *file_priv) 763{ 764 drm_i915_private_t *dev_priv = dev->dev_private; 765 drm_i915_setparam_t *param = data; 766 767 if (!dev_priv) { 768 DRM_ERROR("called with no initialization\n"); 769 return -EINVAL; 770 } 771 772 switch (param->param) { 773 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 774 break; 775 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 776 dev_priv->tex_lru_log_granularity = param->value; 777 break; 778 case I915_SETPARAM_ALLOW_BATCHBUFFER: 779 dev_priv->allow_batchbuffer = param->value; 780 break; 781 case I915_SETPARAM_NUM_USED_FENCES: 782 if (param->value > dev_priv->num_fence_regs || 783 param->value < 0) 784 return -EINVAL; 785 /* Userspace can use first N regs */ 786 dev_priv->fence_reg_start = param->value; 787 break; 788 default: 789 DRM_DEBUG_DRIVER("unknown parameter %d\n", 790 param->param); 791 return -EINVAL; 792 } 793 794 return 0; 795} 796 797static int i915_set_status_page(struct drm_device *dev, void *data, 798 struct drm_file *file_priv) 799{ 800 drm_i915_private_t *dev_priv = dev->dev_private; 801 drm_i915_hws_addr_t *hws = data; 802 struct intel_ring_buffer *ring = &dev_priv->render_ring; 803 804 if (!I915_NEED_GFX_HWS(dev)) 805 return -EINVAL; 806 807 if (!dev_priv) { 808 DRM_ERROR("called with no initialization\n"); 809 return -EINVAL; 810 } 811 812 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 813 WARN(1, "tried to set status page when mode setting active\n"); 814 return 0; 815 } 816 817 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 818 819 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 820 821 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 822 dev_priv->hws_map.size = 4*1024; 823 dev_priv->hws_map.type = 0; 824 dev_priv->hws_map.flags = 0; 825 dev_priv->hws_map.mtrr = 0; 826 827 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 828 if (dev_priv->hws_map.handle == NULL) { 829 i915_dma_cleanup(dev); 830 ring->status_page.gfx_addr = 0; 831 DRM_ERROR("can not ioremap virtual address for" 832 " G33 hw status page\n"); 833 return -ENOMEM; 834 } 835 ring->status_page.page_addr = dev_priv->hws_map.handle; 836 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 837 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 838 839 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 840 ring->status_page.gfx_addr); 841 DRM_DEBUG_DRIVER("load hws at %p\n", 842 ring->status_page.page_addr); 843 return 0; 844} 845 846static int i915_get_bridge_dev(struct drm_device *dev) 847{ 848 struct drm_i915_private *dev_priv = dev->dev_private; 849 850 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 851 if (!dev_priv->bridge_dev) { 852 DRM_ERROR("bridge device not found\n"); 853 return -1; 854 } 855 return 0; 856} 857 858#define MCHBAR_I915 0x44 859#define MCHBAR_I965 0x48 860#define MCHBAR_SIZE (4*4096) 861 862#define DEVEN_REG 0x54 863#define DEVEN_MCHBAR_EN (1 << 28) 864 865/* Allocate space for the MCH regs if needed, return nonzero on error */ 866static int 867intel_alloc_mchbar_resource(struct drm_device *dev) 868{ 869 drm_i915_private_t *dev_priv = dev->dev_private; 870 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 871 u32 temp_lo, temp_hi = 0; 872 u64 mchbar_addr; 873 int ret = 0; 874 875 if (IS_I965G(dev)) 876 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 877 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 878 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 879 880 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 881#ifdef CONFIG_PNP 882 if (mchbar_addr && 883 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { 884 ret = 0; 885 goto out; 886 } 887#endif 888 889 /* Get some space for it */ 890 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, 891 MCHBAR_SIZE, MCHBAR_SIZE, 892 PCIBIOS_MIN_MEM, 893 0, pcibios_align_resource, 894 dev_priv->bridge_dev); 895 if (ret) { 896 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 897 dev_priv->mch_res.start = 0; 898 goto out; 899 } 900 901 if (IS_I965G(dev)) 902 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 903 upper_32_bits(dev_priv->mch_res.start)); 904 905 pci_write_config_dword(dev_priv->bridge_dev, reg, 906 lower_32_bits(dev_priv->mch_res.start)); 907out: 908 return ret; 909} 910 911/* Setup MCHBAR if possible, return true if we should disable it again */ 912static void 913intel_setup_mchbar(struct drm_device *dev) 914{ 915 drm_i915_private_t *dev_priv = dev->dev_private; 916 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 917 u32 temp; 918 bool enabled; 919 920 dev_priv->mchbar_need_disable = false; 921 922 if (IS_I915G(dev) || IS_I915GM(dev)) { 923 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 924 enabled = !!(temp & DEVEN_MCHBAR_EN); 925 } else { 926 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 927 enabled = temp & 1; 928 } 929 930 /* If it's already enabled, don't have to do anything */ 931 if (enabled) 932 return; 933 934 if (intel_alloc_mchbar_resource(dev)) 935 return; 936 937 dev_priv->mchbar_need_disable = true; 938 939 /* Space is allocated or reserved, so enable it. */ 940 if (IS_I915G(dev) || IS_I915GM(dev)) { 941 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, 942 temp | DEVEN_MCHBAR_EN); 943 } else { 944 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 945 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 946 } 947} 948 949static void 950intel_teardown_mchbar(struct drm_device *dev) 951{ 952 drm_i915_private_t *dev_priv = dev->dev_private; 953 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 954 u32 temp; 955 956 if (dev_priv->mchbar_need_disable) { 957 if (IS_I915G(dev) || IS_I915GM(dev)) { 958 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 959 temp &= ~DEVEN_MCHBAR_EN; 960 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); 961 } else { 962 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 963 temp &= ~1; 964 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); 965 } 966 } 967 968 if (dev_priv->mch_res.start) 969 release_resource(&dev_priv->mch_res); 970} 971 972/** 973 * i915_probe_agp - get AGP bootup configuration 974 * @pdev: PCI device 975 * @aperture_size: returns AGP aperture configured size 976 * @preallocated_size: returns size of BIOS preallocated AGP space 977 * 978 * Since Intel integrated graphics are UMA, the BIOS has to set aside 979 * some RAM for the framebuffer at early boot. This code figures out 980 * how much was set aside so we can use it for our own purposes. 981 */ 982static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, 983 uint32_t *preallocated_size, 984 uint32_t *start) 985{ 986 struct drm_i915_private *dev_priv = dev->dev_private; 987 u16 tmp = 0; 988 unsigned long overhead; 989 unsigned long stolen; 990 991 /* Get the fb aperture size and "stolen" memory amount. */ 992 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp); 993 994 *aperture_size = 1024 * 1024; 995 *preallocated_size = 1024 * 1024; 996 997 switch (dev->pdev->device) { 998 case PCI_DEVICE_ID_INTEL_82830_CGC: 999 case PCI_DEVICE_ID_INTEL_82845G_IG: 1000 case PCI_DEVICE_ID_INTEL_82855GM_IG: 1001 case PCI_DEVICE_ID_INTEL_82865_IG: 1002 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) 1003 *aperture_size *= 64; 1004 else 1005 *aperture_size *= 128; 1006 break; 1007 default: 1008 /* 9xx supports large sizes, just look at the length */ 1009 *aperture_size = pci_resource_len(dev->pdev, 2); 1010 break; 1011 } 1012 1013 /* 1014 * Some of the preallocated space is taken by the GTT 1015 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 1016 */ 1017 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) 1018 overhead = 4096; 1019 else 1020 overhead = (*aperture_size / 1024) + 4096; 1021 1022 if (IS_GEN6(dev)) { 1023 /* SNB has memory control reg at 0x50.w */ 1024 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp); 1025 1026 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) { 1027 case INTEL_855_GMCH_GMS_DISABLED: 1028 DRM_ERROR("video memory is disabled\n"); 1029 return -1; 1030 case SNB_GMCH_GMS_STOLEN_32M: 1031 stolen = 32 * 1024 * 1024; 1032 break; 1033 case SNB_GMCH_GMS_STOLEN_64M: 1034 stolen = 64 * 1024 * 1024; 1035 break; 1036 case SNB_GMCH_GMS_STOLEN_96M: 1037 stolen = 96 * 1024 * 1024; 1038 break; 1039 case SNB_GMCH_GMS_STOLEN_128M: 1040 stolen = 128 * 1024 * 1024; 1041 break; 1042 case SNB_GMCH_GMS_STOLEN_160M: 1043 stolen = 160 * 1024 * 1024; 1044 break; 1045 case SNB_GMCH_GMS_STOLEN_192M: 1046 stolen = 192 * 1024 * 1024; 1047 break; 1048 case SNB_GMCH_GMS_STOLEN_224M: 1049 stolen = 224 * 1024 * 1024; 1050 break; 1051 case SNB_GMCH_GMS_STOLEN_256M: 1052 stolen = 256 * 1024 * 1024; 1053 break; 1054 case SNB_GMCH_GMS_STOLEN_288M: 1055 stolen = 288 * 1024 * 1024; 1056 break; 1057 case SNB_GMCH_GMS_STOLEN_320M: 1058 stolen = 320 * 1024 * 1024; 1059 break; 1060 case SNB_GMCH_GMS_STOLEN_352M: 1061 stolen = 352 * 1024 * 1024; 1062 break; 1063 case SNB_GMCH_GMS_STOLEN_384M: 1064 stolen = 384 * 1024 * 1024; 1065 break; 1066 case SNB_GMCH_GMS_STOLEN_416M: 1067 stolen = 416 * 1024 * 1024; 1068 break; 1069 case SNB_GMCH_GMS_STOLEN_448M: 1070 stolen = 448 * 1024 * 1024; 1071 break; 1072 case SNB_GMCH_GMS_STOLEN_480M: 1073 stolen = 480 * 1024 * 1024; 1074 break; 1075 case SNB_GMCH_GMS_STOLEN_512M: 1076 stolen = 512 * 1024 * 1024; 1077 break; 1078 default: 1079 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 1080 tmp & SNB_GMCH_GMS_STOLEN_MASK); 1081 return -1; 1082 } 1083 } else { 1084 switch (tmp & INTEL_GMCH_GMS_MASK) { 1085 case INTEL_855_GMCH_GMS_DISABLED: 1086 DRM_ERROR("video memory is disabled\n"); 1087 return -1; 1088 case INTEL_855_GMCH_GMS_STOLEN_1M: 1089 stolen = 1 * 1024 * 1024; 1090 break; 1091 case INTEL_855_GMCH_GMS_STOLEN_4M: 1092 stolen = 4 * 1024 * 1024; 1093 break; 1094 case INTEL_855_GMCH_GMS_STOLEN_8M: 1095 stolen = 8 * 1024 * 1024; 1096 break; 1097 case INTEL_855_GMCH_GMS_STOLEN_16M: 1098 stolen = 16 * 1024 * 1024; 1099 break; 1100 case INTEL_855_GMCH_GMS_STOLEN_32M: 1101 stolen = 32 * 1024 * 1024; 1102 break; 1103 case INTEL_915G_GMCH_GMS_STOLEN_48M: 1104 stolen = 48 * 1024 * 1024; 1105 break; 1106 case INTEL_915G_GMCH_GMS_STOLEN_64M: 1107 stolen = 64 * 1024 * 1024; 1108 break; 1109 case INTEL_GMCH_GMS_STOLEN_128M: 1110 stolen = 128 * 1024 * 1024; 1111 break; 1112 case INTEL_GMCH_GMS_STOLEN_256M: 1113 stolen = 256 * 1024 * 1024; 1114 break; 1115 case INTEL_GMCH_GMS_STOLEN_96M: 1116 stolen = 96 * 1024 * 1024; 1117 break; 1118 case INTEL_GMCH_GMS_STOLEN_160M: 1119 stolen = 160 * 1024 * 1024; 1120 break; 1121 case INTEL_GMCH_GMS_STOLEN_224M: 1122 stolen = 224 * 1024 * 1024; 1123 break; 1124 case INTEL_GMCH_GMS_STOLEN_352M: 1125 stolen = 352 * 1024 * 1024; 1126 break; 1127 default: 1128 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 1129 tmp & INTEL_GMCH_GMS_MASK); 1130 return -1; 1131 } 1132 } 1133 1134 *preallocated_size = stolen - overhead; 1135 *start = overhead; 1136 1137 return 0; 1138} 1139 1140#define PTE_ADDRESS_MASK 0xfffff000 1141#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 1142#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 1143#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ 1144#define PTE_MAPPING_TYPE_CACHED (3 << 1) 1145#define PTE_MAPPING_TYPE_MASK (3 << 1) 1146#define PTE_VALID (1 << 0) 1147 1148/** 1149 * i915_gtt_to_phys - take a GTT address and turn it into a physical one 1150 * @dev: drm device 1151 * @gtt_addr: address to translate 1152 * 1153 * Some chip functions require allocations from stolen space but need the 1154 * physical address of the memory in question. We use this routine 1155 * to get a physical address suitable for register programming from a given 1156 * GTT address. 1157 */ 1158static unsigned long i915_gtt_to_phys(struct drm_device *dev, 1159 unsigned long gtt_addr) 1160{ 1161 unsigned long *gtt; 1162 unsigned long entry, phys; 1163 int gtt_bar = IS_I9XX(dev) ? 0 : 1; 1164 int gtt_offset, gtt_size; 1165 1166 if (IS_I965G(dev)) { 1167 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 1168 gtt_offset = 2*1024*1024; 1169 gtt_size = 2*1024*1024; 1170 } else { 1171 gtt_offset = 512*1024; 1172 gtt_size = 512*1024; 1173 } 1174 } else { 1175 gtt_bar = 3; 1176 gtt_offset = 0; 1177 gtt_size = pci_resource_len(dev->pdev, gtt_bar); 1178 } 1179 1180 gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, 1181 gtt_size); 1182 if (!gtt) { 1183 DRM_ERROR("ioremap of GTT failed\n"); 1184 return 0; 1185 } 1186 1187 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); 1188 1189 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); 1190 1191 /* Mask out these reserved bits on this hardware. */ 1192 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || 1193 IS_I945G(dev) || IS_I945GM(dev)) { 1194 entry &= ~PTE_ADDRESS_MASK_HIGH; 1195 } 1196 1197 /* If it's not a mapping type we know, then bail. */ 1198 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && 1199 (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { 1200 iounmap(gtt); 1201 return 0; 1202 } 1203 1204 if (!(entry & PTE_VALID)) { 1205 DRM_ERROR("bad GTT entry in stolen space\n"); 1206 iounmap(gtt); 1207 return 0; 1208 } 1209 1210 iounmap(gtt); 1211 1212 phys =(entry & PTE_ADDRESS_MASK) | 1213 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); 1214 1215 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); 1216 1217 return phys; 1218} 1219 1220static void i915_warn_stolen(struct drm_device *dev) 1221{ 1222 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); 1223 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); 1224} 1225 1226static void i915_setup_compression(struct drm_device *dev, int size) 1227{ 1228 struct drm_i915_private *dev_priv = dev->dev_private; 1229 struct drm_mm_node *compressed_fb, *compressed_llb; 1230 unsigned long cfb_base; 1231 unsigned long ll_base = 0; 1232 1233 /* Leave 1M for line length buffer & misc. */ 1234 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1235 if (!compressed_fb) { 1236 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1237 i915_warn_stolen(dev); 1238 return; 1239 } 1240 1241 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1242 if (!compressed_fb) { 1243 i915_warn_stolen(dev); 1244 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1245 return; 1246 } 1247 1248 cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); 1249 if (!cfb_base) { 1250 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); 1251 drm_mm_put_block(compressed_fb); 1252 } 1253 1254 if (!IS_GM45(dev)) { 1255 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 1256 4096, 0); 1257 if (!compressed_llb) { 1258 i915_warn_stolen(dev); 1259 return; 1260 } 1261 1262 compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); 1263 if (!compressed_llb) { 1264 i915_warn_stolen(dev); 1265 return; 1266 } 1267 1268 ll_base = i915_gtt_to_phys(dev, compressed_llb->start); 1269 if (!ll_base) { 1270 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); 1271 drm_mm_put_block(compressed_fb); 1272 drm_mm_put_block(compressed_llb); 1273 } 1274 } 1275 1276 dev_priv->cfb_size = size; 1277 1278 intel_disable_fbc(dev); 1279 dev_priv->compressed_fb = compressed_fb; 1280 1281 if (IS_GM45(dev)) { 1282 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1283 } else { 1284 I915_WRITE(FBC_CFB_BASE, cfb_base); 1285 I915_WRITE(FBC_LL_BASE, ll_base); 1286 dev_priv->compressed_llb = compressed_llb; 1287 } 1288 1289 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1290 ll_base, size >> 20); 1291} 1292 1293static void i915_cleanup_compression(struct drm_device *dev) 1294{ 1295 struct drm_i915_private *dev_priv = dev->dev_private; 1296 1297 drm_mm_put_block(dev_priv->compressed_fb); 1298 if (!IS_GM45(dev)) 1299 drm_mm_put_block(dev_priv->compressed_llb); 1300} 1301 1302/* true = enable decode, false = disable decoder */ 1303static unsigned int i915_vga_set_decode(void *cookie, bool state) 1304{ 1305 struct drm_device *dev = cookie; 1306 1307 intel_modeset_vga_set_state(dev, state); 1308 if (state) 1309 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1310 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1311 else 1312 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1313} 1314 1315static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1316{ 1317 struct drm_device *dev = pci_get_drvdata(pdev); 1318 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1319 if (state == VGA_SWITCHEROO_ON) { 1320 printk(KERN_INFO "i915: switched off\n"); 1321 /* i915 resume handler doesn't set to D0 */ 1322 pci_set_power_state(dev->pdev, PCI_D0); 1323 i915_resume(dev); 1324 } else { 1325 printk(KERN_ERR "i915: switched off\n"); 1326 i915_suspend(dev, pmm); 1327 } 1328} 1329 1330static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 1331{ 1332 struct drm_device *dev = pci_get_drvdata(pdev); 1333 bool can_switch; 1334 1335 spin_lock(&dev->count_lock); 1336 can_switch = (dev->open_count == 0); 1337 spin_unlock(&dev->count_lock); 1338 return can_switch; 1339} 1340 1341static int i915_load_modeset_init(struct drm_device *dev, 1342 unsigned long prealloc_start, 1343 unsigned long prealloc_size, 1344 unsigned long agp_size) 1345{ 1346 struct drm_i915_private *dev_priv = dev->dev_private; 1347 int fb_bar = IS_I9XX(dev) ? 2 : 0; 1348 int ret = 0; 1349 1350 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 1351 0xff000000; 1352 1353 /* Basic memrange allocator for stolen space (aka vram) */ 1354 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1355 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); 1356 1357 /* We're off and running w/KMS */ 1358 dev_priv->mm.suspended = 0; 1359 1360 /* Let GEM Manage from end of prealloc space to end of aperture. 1361 * 1362 * However, leave one page at the end still bound to the scratch page. 1363 * There are a number of places where the hardware apparently 1364 * prefetches past the end of the object, and we've seen multiple 1365 * hangs with the GPU head pointer stuck in a batchbuffer bound 1366 * at the last page of the aperture. One page should be enough to 1367 * keep any prefetching inside of the aperture. 1368 */ 1369 i915_gem_do_init(dev, prealloc_size, agp_size - 4096); 1370 1371 mutex_lock(&dev->struct_mutex); 1372 ret = i915_gem_init_ringbuffer(dev); 1373 mutex_unlock(&dev->struct_mutex); 1374 if (ret) 1375 goto out; 1376 1377 /* Try to set up FBC with a reasonable compressed buffer size */ 1378 if (I915_HAS_FBC(dev) && i915_powersave) { 1379 int cfb_size; 1380 1381 /* Try to get an 8M buffer... */ 1382 if (prealloc_size > (9*1024*1024)) 1383 cfb_size = 8*1024*1024; 1384 else /* fall back to 7/8 of the stolen space */ 1385 cfb_size = prealloc_size * 7 / 8; 1386 i915_setup_compression(dev, cfb_size); 1387 } 1388 1389 /* Allow hardware batchbuffers unless told otherwise. 1390 */ 1391 dev_priv->allow_batchbuffer = 1; 1392 1393 ret = intel_init_bios(dev); 1394 if (ret) 1395 DRM_INFO("failed to find VBIOS tables\n"); 1396 1397 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1398 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1399 if (ret) 1400 goto destroy_ringbuffer; 1401 1402 ret = vga_switcheroo_register_client(dev->pdev, 1403 i915_switcheroo_set_state, 1404 i915_switcheroo_can_switch); 1405 if (ret) 1406 goto destroy_ringbuffer; 1407 1408 intel_modeset_init(dev); 1409 1410 ret = drm_irq_install(dev); 1411 if (ret) 1412 goto destroy_ringbuffer; 1413 1414 /* Always safe in the mode setting case. */ 1415 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1416 dev->vblank_disable_allowed = 1; 1417 1418 /* 1419 * Initialize the hardware status page IRQ location. 1420 */ 1421 1422 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1423 1424 intel_fbdev_init(dev); 1425 drm_kms_helper_poll_init(dev); 1426 return 0; 1427 1428destroy_ringbuffer: 1429 mutex_lock(&dev->struct_mutex); 1430 i915_gem_cleanup_ringbuffer(dev); 1431 mutex_unlock(&dev->struct_mutex); 1432out: 1433 return ret; 1434} 1435 1436int i915_master_create(struct drm_device *dev, struct drm_master *master) 1437{ 1438 struct drm_i915_master_private *master_priv; 1439 1440 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 1441 if (!master_priv) 1442 return -ENOMEM; 1443 1444 master->driver_priv = master_priv; 1445 return 0; 1446} 1447 1448void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1449{ 1450 struct drm_i915_master_private *master_priv = master->driver_priv; 1451 1452 if (!master_priv) 1453 return; 1454 1455 kfree(master_priv); 1456 1457 master->driver_priv = NULL; 1458} 1459 1460static void i915_pineview_get_mem_freq(struct drm_device *dev) 1461{ 1462 drm_i915_private_t *dev_priv = dev->dev_private; 1463 u32 tmp; 1464 1465 tmp = I915_READ(CLKCFG); 1466 1467 switch (tmp & CLKCFG_FSB_MASK) { 1468 case CLKCFG_FSB_533: 1469 dev_priv->fsb_freq = 533; /* 133*4 */ 1470 break; 1471 case CLKCFG_FSB_800: 1472 dev_priv->fsb_freq = 800; /* 200*4 */ 1473 break; 1474 case CLKCFG_FSB_667: 1475 dev_priv->fsb_freq = 667; /* 167*4 */ 1476 break; 1477 case CLKCFG_FSB_400: 1478 dev_priv->fsb_freq = 400; /* 100*4 */ 1479 break; 1480 } 1481 1482 switch (tmp & CLKCFG_MEM_MASK) { 1483 case CLKCFG_MEM_533: 1484 dev_priv->mem_freq = 533; 1485 break; 1486 case CLKCFG_MEM_667: 1487 dev_priv->mem_freq = 667; 1488 break; 1489 case CLKCFG_MEM_800: 1490 dev_priv->mem_freq = 800; 1491 break; 1492 } 1493 1494 /* detect pineview DDR3 setting */ 1495 tmp = I915_READ(CSHRDDR3CTL); 1496 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 1497} 1498 1499static void i915_ironlake_get_mem_freq(struct drm_device *dev) 1500{ 1501 drm_i915_private_t *dev_priv = dev->dev_private; 1502 u16 ddrpll, csipll; 1503 1504 ddrpll = I915_READ16(DDRMPLL1); 1505 csipll = I915_READ16(CSIPLL0); 1506 1507 switch (ddrpll & 0xff) { 1508 case 0xc: 1509 dev_priv->mem_freq = 800; 1510 break; 1511 case 0x10: 1512 dev_priv->mem_freq = 1066; 1513 break; 1514 case 0x14: 1515 dev_priv->mem_freq = 1333; 1516 break; 1517 case 0x18: 1518 dev_priv->mem_freq = 1600; 1519 break; 1520 default: 1521 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 1522 ddrpll & 0xff); 1523 dev_priv->mem_freq = 0; 1524 break; 1525 } 1526 1527 dev_priv->r_t = dev_priv->mem_freq; 1528 1529 switch (csipll & 0x3ff) { 1530 case 0x00c: 1531 dev_priv->fsb_freq = 3200; 1532 break; 1533 case 0x00e: 1534 dev_priv->fsb_freq = 3733; 1535 break; 1536 case 0x010: 1537 dev_priv->fsb_freq = 4266; 1538 break; 1539 case 0x012: 1540 dev_priv->fsb_freq = 4800; 1541 break; 1542 case 0x014: 1543 dev_priv->fsb_freq = 5333; 1544 break; 1545 case 0x016: 1546 dev_priv->fsb_freq = 5866; 1547 break; 1548 case 0x018: 1549 dev_priv->fsb_freq = 6400; 1550 break; 1551 default: 1552 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 1553 csipll & 0x3ff); 1554 dev_priv->fsb_freq = 0; 1555 break; 1556 } 1557 1558 if (dev_priv->fsb_freq == 3200) { 1559 dev_priv->c_m = 0; 1560 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 1561 dev_priv->c_m = 1; 1562 } else { 1563 dev_priv->c_m = 2; 1564 } 1565} 1566 1567struct v_table { 1568 u8 vid; 1569 unsigned long vd; /* in .1 mil */ 1570 unsigned long vm; /* in .1 mil */ 1571 u8 pvid; 1572}; 1573 1574static struct v_table v_table[] = { 1575 { 0, 16125, 15000, 0x7f, }, 1576 { 1, 16000, 14875, 0x7e, }, 1577 { 2, 15875, 14750, 0x7d, }, 1578 { 3, 15750, 14625, 0x7c, }, 1579 { 4, 15625, 14500, 0x7b, }, 1580 { 5, 15500, 14375, 0x7a, }, 1581 { 6, 15375, 14250, 0x79, }, 1582 { 7, 15250, 14125, 0x78, }, 1583 { 8, 15125, 14000, 0x77, }, 1584 { 9, 15000, 13875, 0x76, }, 1585 { 10, 14875, 13750, 0x75, }, 1586 { 11, 14750, 13625, 0x74, }, 1587 { 12, 14625, 13500, 0x73, }, 1588 { 13, 14500, 13375, 0x72, }, 1589 { 14, 14375, 13250, 0x71, }, 1590 { 15, 14250, 13125, 0x70, }, 1591 { 16, 14125, 13000, 0x6f, }, 1592 { 17, 14000, 12875, 0x6e, }, 1593 { 18, 13875, 12750, 0x6d, }, 1594 { 19, 13750, 12625, 0x6c, }, 1595 { 20, 13625, 12500, 0x6b, }, 1596 { 21, 13500, 12375, 0x6a, }, 1597 { 22, 13375, 12250, 0x69, }, 1598 { 23, 13250, 12125, 0x68, }, 1599 { 24, 13125, 12000, 0x67, }, 1600 { 25, 13000, 11875, 0x66, }, 1601 { 26, 12875, 11750, 0x65, }, 1602 { 27, 12750, 11625, 0x64, }, 1603 { 28, 12625, 11500, 0x63, }, 1604 { 29, 12500, 11375, 0x62, }, 1605 { 30, 12375, 11250, 0x61, }, 1606 { 31, 12250, 11125, 0x60, }, 1607 { 32, 12125, 11000, 0x5f, }, 1608 { 33, 12000, 10875, 0x5e, }, 1609 { 34, 11875, 10750, 0x5d, }, 1610 { 35, 11750, 10625, 0x5c, }, 1611 { 36, 11625, 10500, 0x5b, }, 1612 { 37, 11500, 10375, 0x5a, }, 1613 { 38, 11375, 10250, 0x59, }, 1614 { 39, 11250, 10125, 0x58, }, 1615 { 40, 11125, 10000, 0x57, }, 1616 { 41, 11000, 9875, 0x56, }, 1617 { 42, 10875, 9750, 0x55, }, 1618 { 43, 10750, 9625, 0x54, }, 1619 { 44, 10625, 9500, 0x53, }, 1620 { 45, 10500, 9375, 0x52, }, 1621 { 46, 10375, 9250, 0x51, }, 1622 { 47, 10250, 9125, 0x50, }, 1623 { 48, 10125, 9000, 0x4f, }, 1624 { 49, 10000, 8875, 0x4e, }, 1625 { 50, 9875, 8750, 0x4d, }, 1626 { 51, 9750, 8625, 0x4c, }, 1627 { 52, 9625, 8500, 0x4b, }, 1628 { 53, 9500, 8375, 0x4a, }, 1629 { 54, 9375, 8250, 0x49, }, 1630 { 55, 9250, 8125, 0x48, }, 1631 { 56, 9125, 8000, 0x47, }, 1632 { 57, 9000, 7875, 0x46, }, 1633 { 58, 8875, 7750, 0x45, }, 1634 { 59, 8750, 7625, 0x44, }, 1635 { 60, 8625, 7500, 0x43, }, 1636 { 61, 8500, 7375, 0x42, }, 1637 { 62, 8375, 7250, 0x41, }, 1638 { 63, 8250, 7125, 0x40, }, 1639 { 64, 8125, 7000, 0x3f, }, 1640 { 65, 8000, 6875, 0x3e, }, 1641 { 66, 7875, 6750, 0x3d, }, 1642 { 67, 7750, 6625, 0x3c, }, 1643 { 68, 7625, 6500, 0x3b, }, 1644 { 69, 7500, 6375, 0x3a, }, 1645 { 70, 7375, 6250, 0x39, }, 1646 { 71, 7250, 6125, 0x38, }, 1647 { 72, 7125, 6000, 0x37, }, 1648 { 73, 7000, 5875, 0x36, }, 1649 { 74, 6875, 5750, 0x35, }, 1650 { 75, 6750, 5625, 0x34, }, 1651 { 76, 6625, 5500, 0x33, }, 1652 { 77, 6500, 5375, 0x32, }, 1653 { 78, 6375, 5250, 0x31, }, 1654 { 79, 6250, 5125, 0x30, }, 1655 { 80, 6125, 5000, 0x2f, }, 1656 { 81, 6000, 4875, 0x2e, }, 1657 { 82, 5875, 4750, 0x2d, }, 1658 { 83, 5750, 4625, 0x2c, }, 1659 { 84, 5625, 4500, 0x2b, }, 1660 { 85, 5500, 4375, 0x2a, }, 1661 { 86, 5375, 4250, 0x29, }, 1662 { 87, 5250, 4125, 0x28, }, 1663 { 88, 5125, 4000, 0x27, }, 1664 { 89, 5000, 3875, 0x26, }, 1665 { 90, 4875, 3750, 0x25, }, 1666 { 91, 4750, 3625, 0x24, }, 1667 { 92, 4625, 3500, 0x23, }, 1668 { 93, 4500, 3375, 0x22, }, 1669 { 94, 4375, 3250, 0x21, }, 1670 { 95, 4250, 3125, 0x20, }, 1671 { 96, 4125, 3000, 0x1f, }, 1672 { 97, 4125, 3000, 0x1e, }, 1673 { 98, 4125, 3000, 0x1d, }, 1674 { 99, 4125, 3000, 0x1c, }, 1675 { 100, 4125, 3000, 0x1b, }, 1676 { 101, 4125, 3000, 0x1a, }, 1677 { 102, 4125, 3000, 0x19, }, 1678 { 103, 4125, 3000, 0x18, }, 1679 { 104, 4125, 3000, 0x17, }, 1680 { 105, 4125, 3000, 0x16, }, 1681 { 106, 4125, 3000, 0x15, }, 1682 { 107, 4125, 3000, 0x14, }, 1683 { 108, 4125, 3000, 0x13, }, 1684 { 109, 4125, 3000, 0x12, }, 1685 { 110, 4125, 3000, 0x11, }, 1686 { 111, 4125, 3000, 0x10, }, 1687 { 112, 4125, 3000, 0x0f, }, 1688 { 113, 4125, 3000, 0x0e, }, 1689 { 114, 4125, 3000, 0x0d, }, 1690 { 115, 4125, 3000, 0x0c, }, 1691 { 116, 4125, 3000, 0x0b, }, 1692 { 117, 4125, 3000, 0x0a, }, 1693 { 118, 4125, 3000, 0x09, }, 1694 { 119, 4125, 3000, 0x08, }, 1695 { 120, 1125, 0, 0x07, }, 1696 { 121, 1000, 0, 0x06, }, 1697 { 122, 875, 0, 0x05, }, 1698 { 123, 750, 0, 0x04, }, 1699 { 124, 625, 0, 0x03, }, 1700 { 125, 500, 0, 0x02, }, 1701 { 126, 375, 0, 0x01, }, 1702 { 127, 0, 0, 0x00, }, 1703}; 1704 1705struct cparams { 1706 int i; 1707 int t; 1708 int m; 1709 int c; 1710}; 1711 1712static struct cparams cparams[] = { 1713 { 1, 1333, 301, 28664 }, 1714 { 1, 1066, 294, 24460 }, 1715 { 1, 800, 294, 25192 }, 1716 { 0, 1333, 276, 27605 }, 1717 { 0, 1066, 276, 27605 }, 1718 { 0, 800, 231, 23784 }, 1719}; 1720 1721unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 1722{ 1723 u64 total_count, diff, ret; 1724 u32 count1, count2, count3, m = 0, c = 0; 1725 unsigned long now = jiffies_to_msecs(jiffies), diff1; 1726 int i; 1727 1728 diff1 = now - dev_priv->last_time1; 1729 1730 count1 = I915_READ(DMIEC); 1731 count2 = I915_READ(DDREC); 1732 count3 = I915_READ(CSIEC); 1733 1734 total_count = count1 + count2 + count3; 1735 1736 /* FIXME: handle per-counter overflow */ 1737 if (total_count < dev_priv->last_count1) { 1738 diff = ~0UL - dev_priv->last_count1; 1739 diff += total_count; 1740 } else { 1741 diff = total_count - dev_priv->last_count1; 1742 } 1743 1744 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 1745 if (cparams[i].i == dev_priv->c_m && 1746 cparams[i].t == dev_priv->r_t) { 1747 m = cparams[i].m; 1748 c = cparams[i].c; 1749 break; 1750 } 1751 } 1752 1753 div_u64(diff, diff1); 1754 ret = ((m * diff) + c); 1755 div_u64(ret, 10); 1756 1757 dev_priv->last_count1 = total_count; 1758 dev_priv->last_time1 = now; 1759 1760 return ret; 1761} 1762 1763unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 1764{ 1765 unsigned long m, x, b; 1766 u32 tsfs; 1767 1768 tsfs = I915_READ(TSFS); 1769 1770 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 1771 x = I915_READ8(TR1); 1772 1773 b = tsfs & TSFS_INTR_MASK; 1774 1775 return ((m * x) / 127) - b; 1776} 1777 1778static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 1779{ 1780 unsigned long val = 0; 1781 int i; 1782 1783 for (i = 0; i < ARRAY_SIZE(v_table); i++) { 1784 if (v_table[i].pvid == pxvid) { 1785 if (IS_MOBILE(dev_priv->dev)) 1786 val = v_table[i].vm; 1787 else 1788 val = v_table[i].vd; 1789 } 1790 } 1791 1792 return val; 1793} 1794 1795void i915_update_gfx_val(struct drm_i915_private *dev_priv) 1796{ 1797 struct timespec now, diff1; 1798 u64 diff; 1799 unsigned long diffms; 1800 u32 count; 1801 1802 getrawmonotonic(&now); 1803 diff1 = timespec_sub(now, dev_priv->last_time2); 1804 1805 /* Don't divide by 0 */ 1806 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 1807 if (!diffms) 1808 return; 1809 1810 count = I915_READ(GFXEC); 1811 1812 if (count < dev_priv->last_count2) { 1813 diff = ~0UL - dev_priv->last_count2; 1814 diff += count; 1815 } else { 1816 diff = count - dev_priv->last_count2; 1817 } 1818 1819 dev_priv->last_count2 = count; 1820 dev_priv->last_time2 = now; 1821 1822 /* More magic constants... */ 1823 diff = diff * 1181; 1824 div_u64(diff, diffms * 10); 1825 dev_priv->gfx_power = diff; 1826} 1827 1828unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 1829{ 1830 unsigned long t, corr, state1, corr2, state2; 1831 u32 pxvid, ext_v; 1832 1833 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 1834 pxvid = (pxvid >> 24) & 0x7f; 1835 ext_v = pvid_to_extvid(dev_priv, pxvid); 1836 1837 state1 = ext_v; 1838 1839 t = i915_mch_val(dev_priv); 1840 1841 /* Revel in the empirically derived constants */ 1842 1843 /* Correction factor in 1/100000 units */ 1844 if (t > 80) 1845 corr = ((t * 2349) + 135940); 1846 else if (t >= 50) 1847 corr = ((t * 964) + 29317); 1848 else /* < 50 */ 1849 corr = ((t * 301) + 1004); 1850 1851 corr = corr * ((150142 * state1) / 10000 - 78642); 1852 corr /= 100000; 1853 corr2 = (corr * dev_priv->corr); 1854 1855 state2 = (corr2 * state1) / 10000; 1856 state2 /= 100; /* convert to mW */ 1857 1858 i915_update_gfx_val(dev_priv); 1859 1860 return dev_priv->gfx_power + state2; 1861} 1862 1863/* Global for IPS driver to get at the current i915 device */ 1864static struct drm_i915_private *i915_mch_dev; 1865/* 1866 * Lock protecting IPS related data structures 1867 * - i915_mch_dev 1868 * - dev_priv->max_delay 1869 * - dev_priv->min_delay 1870 * - dev_priv->fmax 1871 * - dev_priv->gpu_busy 1872 */ 1873DEFINE_SPINLOCK(mchdev_lock); 1874 1875/** 1876 * i915_read_mch_val - return value for IPS use 1877 * 1878 * Calculate and return a value for the IPS driver to use when deciding whether 1879 * we have thermal and power headroom to increase CPU or GPU power budget. 1880 */ 1881unsigned long i915_read_mch_val(void) 1882{ 1883 struct drm_i915_private *dev_priv; 1884 unsigned long chipset_val, graphics_val, ret = 0; 1885 1886 spin_lock(&mchdev_lock); 1887 if (!i915_mch_dev) 1888 goto out_unlock; 1889 dev_priv = i915_mch_dev; 1890 1891 chipset_val = i915_chipset_val(dev_priv); 1892 graphics_val = i915_gfx_val(dev_priv); 1893 1894 ret = chipset_val + graphics_val; 1895 1896out_unlock: 1897 spin_unlock(&mchdev_lock); 1898 1899 return ret; 1900} 1901EXPORT_SYMBOL_GPL(i915_read_mch_val); 1902 1903/** 1904 * i915_gpu_raise - raise GPU frequency limit 1905 * 1906 * Raise the limit; IPS indicates we have thermal headroom. 1907 */ 1908bool i915_gpu_raise(void) 1909{ 1910 struct drm_i915_private *dev_priv; 1911 bool ret = true; 1912 1913 spin_lock(&mchdev_lock); 1914 if (!i915_mch_dev) { 1915 ret = false; 1916 goto out_unlock; 1917 } 1918 dev_priv = i915_mch_dev; 1919 1920 if (dev_priv->max_delay > dev_priv->fmax) 1921 dev_priv->max_delay--; 1922 1923out_unlock: 1924 spin_unlock(&mchdev_lock); 1925 1926 return ret; 1927} 1928EXPORT_SYMBOL_GPL(i915_gpu_raise); 1929 1930/** 1931 * i915_gpu_lower - lower GPU frequency limit 1932 * 1933 * IPS indicates we're close to a thermal limit, so throttle back the GPU 1934 * frequency maximum. 1935 */ 1936bool i915_gpu_lower(void) 1937{ 1938 struct drm_i915_private *dev_priv; 1939 bool ret = true; 1940 1941 spin_lock(&mchdev_lock); 1942 if (!i915_mch_dev) { 1943 ret = false; 1944 goto out_unlock; 1945 } 1946 dev_priv = i915_mch_dev; 1947 1948 if (dev_priv->max_delay < dev_priv->min_delay) 1949 dev_priv->max_delay++; 1950 1951out_unlock: 1952 spin_unlock(&mchdev_lock); 1953 1954 return ret; 1955} 1956EXPORT_SYMBOL_GPL(i915_gpu_lower); 1957 1958/** 1959 * i915_gpu_busy - indicate GPU business to IPS 1960 * 1961 * Tell the IPS driver whether or not the GPU is busy. 1962 */ 1963bool i915_gpu_busy(void) 1964{ 1965 struct drm_i915_private *dev_priv; 1966 bool ret = false; 1967 1968 spin_lock(&mchdev_lock); 1969 if (!i915_mch_dev) 1970 goto out_unlock; 1971 dev_priv = i915_mch_dev; 1972 1973 ret = dev_priv->busy; 1974 1975out_unlock: 1976 spin_unlock(&mchdev_lock); 1977 1978 return ret; 1979} 1980EXPORT_SYMBOL_GPL(i915_gpu_busy); 1981 1982/** 1983 * i915_gpu_turbo_disable - disable graphics turbo 1984 * 1985 * Disable graphics turbo by resetting the max frequency and setting the 1986 * current frequency to the default. 1987 */ 1988bool i915_gpu_turbo_disable(void) 1989{ 1990 struct drm_i915_private *dev_priv; 1991 bool ret = true; 1992 1993 spin_lock(&mchdev_lock); 1994 if (!i915_mch_dev) { 1995 ret = false; 1996 goto out_unlock; 1997 } 1998 dev_priv = i915_mch_dev; 1999 2000 dev_priv->max_delay = dev_priv->fstart; 2001 2002 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 2003 ret = false; 2004 2005out_unlock: 2006 spin_unlock(&mchdev_lock); 2007 2008 return ret; 2009} 2010EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 2011 2012/** 2013 * i915_driver_load - setup chip and create an initial config 2014 * @dev: DRM device 2015 * @flags: startup flags 2016 * 2017 * The driver load routine has to do several things: 2018 * - drive output discovery via intel_modeset_init() 2019 * - initialize the memory manager 2020 * - allocate initial config memory 2021 * - setup the DRM framebuffer with the allocated memory 2022 */ 2023int i915_driver_load(struct drm_device *dev, unsigned long flags) 2024{ 2025 struct drm_i915_private *dev_priv; 2026 resource_size_t base, size; 2027 int ret = 0, mmio_bar; 2028 uint32_t agp_size, prealloc_size, prealloc_start; 2029 /* i915 has 4 more counters */ 2030 dev->counters += 4; 2031 dev->types[6] = _DRM_STAT_IRQ; 2032 dev->types[7] = _DRM_STAT_PRIMARY; 2033 dev->types[8] = _DRM_STAT_SECONDARY; 2034 dev->types[9] = _DRM_STAT_DMA; 2035 2036 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); 2037 if (dev_priv == NULL) 2038 return -ENOMEM; 2039 2040 dev->dev_private = (void *)dev_priv; 2041 dev_priv->dev = dev; 2042 dev_priv->info = (struct intel_device_info *) flags; 2043 2044 /* Add register map (needed for suspend/resume) */ 2045 mmio_bar = IS_I9XX(dev) ? 0 : 1; 2046 base = drm_get_resource_start(dev, mmio_bar); 2047 size = drm_get_resource_len(dev, mmio_bar); 2048 2049 if (i915_get_bridge_dev(dev)) { 2050 ret = -EIO; 2051 goto free_priv; 2052 } 2053 2054 dev_priv->regs = ioremap(base, size); 2055 if (!dev_priv->regs) { 2056 DRM_ERROR("failed to map registers\n"); 2057 ret = -EIO; 2058 goto put_bridge; 2059 } 2060 2061 dev_priv->mm.gtt_mapping = 2062 io_mapping_create_wc(dev->agp->base, 2063 dev->agp->agp_info.aper_size * 1024*1024); 2064 if (dev_priv->mm.gtt_mapping == NULL) { 2065 ret = -EIO; 2066 goto out_rmmap; 2067 } 2068 2069 /* Set up a WC MTRR for non-PAT systems. This is more common than 2070 * one would think, because the kernel disables PAT on first 2071 * generation Core chips because WC PAT gets overridden by a UC 2072 * MTRR if present. Even if a UC MTRR isn't present. 2073 */ 2074 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 2075 dev->agp->agp_info.aper_size * 2076 1024 * 1024, 2077 MTRR_TYPE_WRCOMB, 1); 2078 if (dev_priv->mm.gtt_mtrr < 0) { 2079 DRM_INFO("MTRR allocation failed. Graphics " 2080 "performance may suffer.\n"); 2081 } 2082 2083 ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); 2084 if (ret) 2085 goto out_iomapfree; 2086 2087 dev_priv->wq = create_singlethread_workqueue("i915"); 2088 if (dev_priv->wq == NULL) { 2089 DRM_ERROR("Failed to create our workqueue.\n"); 2090 ret = -ENOMEM; 2091 goto out_iomapfree; 2092 } 2093 2094 /* enable GEM by default */ 2095 dev_priv->has_gem = 1; 2096 2097 if (prealloc_size > agp_size * 3 / 4) { 2098 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " 2099 "memory stolen.\n", 2100 prealloc_size / 1024, agp_size / 1024); 2101 DRM_ERROR("Disabling GEM. (try reducing stolen memory or " 2102 "updating the BIOS to fix).\n"); 2103 dev_priv->has_gem = 0; 2104 } 2105 2106 if (dev_priv->has_gem == 0 && 2107 drm_core_check_feature(dev, DRIVER_MODESET)) { 2108 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); 2109 ret = -ENODEV; 2110 goto out_iomapfree; 2111 } 2112 2113 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2114 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2115 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 2116 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2117 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2118 } 2119 2120 /* Try to make sure MCHBAR is enabled before poking at it */ 2121 intel_setup_mchbar(dev); 2122 2123 i915_gem_load(dev); 2124 2125 /* Init HWS */ 2126 if (!I915_NEED_GFX_HWS(dev)) { 2127 ret = i915_init_phys_hws(dev); 2128 if (ret != 0) 2129 goto out_workqueue_free; 2130 } 2131 2132 if (IS_PINEVIEW(dev)) 2133 i915_pineview_get_mem_freq(dev); 2134 else if (IS_IRONLAKE(dev)) 2135 i915_ironlake_get_mem_freq(dev); 2136 2137 /* On the 945G/GM, the chipset reports the MSI capability on the 2138 * integrated graphics even though the support isn't actually there 2139 * according to the published specs. It doesn't appear to function 2140 * correctly in testing on 945G. 2141 * This may be a side effect of MSI having been made available for PEG 2142 * and the registers being closely associated. 2143 * 2144 * According to chipset errata, on the 965GM, MSI interrupts may 2145 * be lost or delayed, but we use them anyways to avoid 2146 * stuck interrupts on some machines. 2147 */ 2148 if (!IS_I945G(dev) && !IS_I945GM(dev)) 2149 pci_enable_msi(dev->pdev); 2150 2151 spin_lock_init(&dev_priv->user_irq_lock); 2152 spin_lock_init(&dev_priv->error_lock); 2153 dev_priv->trace_irq_seqno = 0; 2154 2155 ret = drm_vblank_init(dev, I915_NUM_PIPE); 2156 2157 if (ret) { 2158 (void) i915_driver_unload(dev); 2159 return ret; 2160 } 2161 2162 /* Start out suspended */ 2163 dev_priv->mm.suspended = 1; 2164 2165 intel_detect_pch(dev); 2166 2167 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2168 ret = i915_load_modeset_init(dev, prealloc_start, 2169 prealloc_size, agp_size); 2170 if (ret < 0) { 2171 DRM_ERROR("failed to init modeset\n"); 2172 goto out_workqueue_free; 2173 } 2174 } 2175 2176 /* Must be done after probing outputs */ 2177 intel_opregion_init(dev, 0); 2178 2179 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2180 (unsigned long) dev); 2181 2182 spin_lock(&mchdev_lock); 2183 i915_mch_dev = dev_priv; 2184 dev_priv->mchdev_lock = &mchdev_lock; 2185 spin_unlock(&mchdev_lock); 2186 2187 return 0; 2188 2189out_workqueue_free: 2190 destroy_workqueue(dev_priv->wq); 2191out_iomapfree: 2192 io_mapping_free(dev_priv->mm.gtt_mapping); 2193out_rmmap: 2194 iounmap(dev_priv->regs); 2195put_bridge: 2196 pci_dev_put(dev_priv->bridge_dev); 2197free_priv: 2198 kfree(dev_priv); 2199 return ret; 2200} 2201 2202int i915_driver_unload(struct drm_device *dev) 2203{ 2204 struct drm_i915_private *dev_priv = dev->dev_private; 2205 2206 i915_destroy_error_state(dev); 2207 2208 spin_lock(&mchdev_lock); 2209 i915_mch_dev = NULL; 2210 spin_unlock(&mchdev_lock); 2211 2212 destroy_workqueue(dev_priv->wq); 2213 del_timer_sync(&dev_priv->hangcheck_timer); 2214 2215 io_mapping_free(dev_priv->mm.gtt_mapping); 2216 if (dev_priv->mm.gtt_mtrr >= 0) { 2217 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 2218 dev->agp->agp_info.aper_size * 1024 * 1024); 2219 dev_priv->mm.gtt_mtrr = -1; 2220 } 2221 2222 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2223 intel_modeset_cleanup(dev); 2224 2225 /* 2226 * free the memory space allocated for the child device 2227 * config parsed from VBT 2228 */ 2229 if (dev_priv->child_dev && dev_priv->child_dev_num) { 2230 kfree(dev_priv->child_dev); 2231 dev_priv->child_dev = NULL; 2232 dev_priv->child_dev_num = 0; 2233 } 2234 drm_irq_uninstall(dev); 2235 vga_switcheroo_unregister_client(dev->pdev); 2236 vga_client_register(dev->pdev, NULL, NULL, NULL); 2237 } 2238 2239 if (dev->pdev->msi_enabled) 2240 pci_disable_msi(dev->pdev); 2241 2242 if (dev_priv->regs != NULL) 2243 iounmap(dev_priv->regs); 2244 2245 intel_opregion_free(dev, 0); 2246 2247 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2248 i915_gem_free_all_phys_object(dev); 2249 2250 mutex_lock(&dev->struct_mutex); 2251 i915_gem_cleanup_ringbuffer(dev); 2252 mutex_unlock(&dev->struct_mutex); 2253 if (I915_HAS_FBC(dev) && i915_powersave) 2254 i915_cleanup_compression(dev); 2255 drm_mm_takedown(&dev_priv->vram); 2256 i915_gem_lastclose(dev); 2257 2258 intel_cleanup_overlay(dev); 2259 } 2260 2261 intel_teardown_mchbar(dev); 2262 2263 pci_dev_put(dev_priv->bridge_dev); 2264 kfree(dev->dev_private); 2265 2266 return 0; 2267} 2268 2269int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 2270{ 2271 struct drm_i915_file_private *i915_file_priv; 2272 2273 DRM_DEBUG_DRIVER("\n"); 2274 i915_file_priv = (struct drm_i915_file_private *) 2275 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); 2276 2277 if (!i915_file_priv) 2278 return -ENOMEM; 2279 2280 file_priv->driver_priv = i915_file_priv; 2281 2282 INIT_LIST_HEAD(&i915_file_priv->mm.request_list); 2283 2284 return 0; 2285} 2286 2287/** 2288 * i915_driver_lastclose - clean up after all DRM clients have exited 2289 * @dev: DRM device 2290 * 2291 * Take care of cleaning up after all DRM clients have exited. In the 2292 * mode setting case, we want to restore the kernel's initial mode (just 2293 * in case the last client left us in a bad state). 2294 * 2295 * Additionally, in the non-mode setting case, we'll tear down the AGP 2296 * and DMA structures, since the kernel won't be using them, and clea 2297 * up any GEM state. 2298 */ 2299void i915_driver_lastclose(struct drm_device * dev) 2300{ 2301 drm_i915_private_t *dev_priv = dev->dev_private; 2302 2303 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 2304 drm_fb_helper_restore(); 2305 vga_switcheroo_process_delayed_switch(); 2306 return; 2307 } 2308 2309 i915_gem_lastclose(dev); 2310 2311 if (dev_priv->agp_heap) 2312 i915_mem_takedown(&(dev_priv->agp_heap)); 2313 2314 i915_dma_cleanup(dev); 2315} 2316 2317void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 2318{ 2319 drm_i915_private_t *dev_priv = dev->dev_private; 2320 i915_gem_release(dev, file_priv); 2321 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2322 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 2323} 2324 2325void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 2326{ 2327 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 2328 2329 kfree(i915_file_priv); 2330} 2331 2332struct drm_ioctl_desc i915_ioctls[] = { 2333 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2334 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 2335 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 2336 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 2337 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 2338 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 2339 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 2340 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2341 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), 2342 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), 2343 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2344 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 2345 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 2346 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 2347 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 2348 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 2349 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2350 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2351 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 2352 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 2353 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2354 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2355 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 2356 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 2357 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2358 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2359 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 2360 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 2361 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 2362 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 2363 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 2364 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 2365 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 2366 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 2367 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 2368 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 2369 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 2370 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2371 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2372 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2373}; 2374 2375int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2376 2377/** 2378 * Determine if the device really is AGP or not. 2379 * 2380 * All Intel graphics chipsets are treated as AGP, even if they are really 2381 * PCI-e. 2382 * 2383 * \param dev The device to be tested. 2384 * 2385 * \returns 2386 * A value of 1 is always retured to indictate every i9x5 is AGP. 2387 */ 2388int i915_driver_device_is_agp(struct drm_device * dev) 2389{ 2390 return 1; 2391} 2392