i915_dma.c revision b52eb4dcab23fe0c52a437276258e0afcf913ef5
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include "drmP.h" 30#include "drm.h" 31#include "drm_crtc_helper.h" 32#include "drm_fb_helper.h" 33#include "intel_drv.h" 34#include "i915_drm.h" 35#include "i915_drv.h" 36#include "i915_trace.h" 37#include <linux/pci.h> 38#include <linux/vgaarb.h> 39#include <linux/acpi.h> 40#include <linux/pnp.h> 41#include <linux/vga_switcheroo.h> 42#include <linux/slab.h> 43 44/** 45 * Sets up the hardware status page for devices that need a physical address 46 * in the register. 47 */ 48static int i915_init_phys_hws(struct drm_device *dev) 49{ 50 drm_i915_private_t *dev_priv = dev->dev_private; 51 /* Program Hardware Status Page */ 52 dev_priv->status_page_dmah = 53 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 54 55 if (!dev_priv->status_page_dmah) { 56 DRM_ERROR("Can not allocate hardware status page\n"); 57 return -ENOMEM; 58 } 59 dev_priv->render_ring.status_page.page_addr 60 = dev_priv->status_page_dmah->vaddr; 61 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 62 63 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 64 65 if (IS_I965G(dev)) 66 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 67 0xf0; 68 69 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 70 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 71 return 0; 72} 73 74/** 75 * Frees the hardware status page, whether it's a physical address or a virtual 76 * address set up by the X Server. 77 */ 78static void i915_free_hws(struct drm_device *dev) 79{ 80 drm_i915_private_t *dev_priv = dev->dev_private; 81 if (dev_priv->status_page_dmah) { 82 drm_pci_free(dev, dev_priv->status_page_dmah); 83 dev_priv->status_page_dmah = NULL; 84 } 85 86 if (dev_priv->render_ring.status_page.gfx_addr) { 87 dev_priv->render_ring.status_page.gfx_addr = 0; 88 drm_core_ioremapfree(&dev_priv->hws_map, dev); 89 } 90 91 /* Need to rewrite hardware status page */ 92 I915_WRITE(HWS_PGA, 0x1ffff000); 93} 94 95void i915_kernel_lost_context(struct drm_device * dev) 96{ 97 drm_i915_private_t *dev_priv = dev->dev_private; 98 struct drm_i915_master_private *master_priv; 99 struct intel_ring_buffer *ring = &dev_priv->render_ring; 100 101 /* 102 * We should never lose context on the ring with modesetting 103 * as we don't expose it to userspace 104 */ 105 if (drm_core_check_feature(dev, DRIVER_MODESET)) 106 return; 107 108 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 109 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 110 ring->space = ring->head - (ring->tail + 8); 111 if (ring->space < 0) 112 ring->space += ring->size; 113 114 if (!dev->primary->master) 115 return; 116 117 master_priv = dev->primary->master->driver_priv; 118 if (ring->head == ring->tail && master_priv->sarea_priv) 119 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 120} 121 122static int i915_dma_cleanup(struct drm_device * dev) 123{ 124 drm_i915_private_t *dev_priv = dev->dev_private; 125 /* Make sure interrupts are disabled here because the uninstall ioctl 126 * may not have been called from userspace and after dev_private 127 * is freed, it's too late. 128 */ 129 if (dev->irq_enabled) 130 drm_irq_uninstall(dev); 131 132 mutex_lock(&dev->struct_mutex); 133 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 134 if (HAS_BSD(dev)) 135 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 136 mutex_unlock(&dev->struct_mutex); 137 138 /* Clear the HWS virtual address at teardown */ 139 if (I915_NEED_GFX_HWS(dev)) 140 i915_free_hws(dev); 141 142 return 0; 143} 144 145static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 146{ 147 drm_i915_private_t *dev_priv = dev->dev_private; 148 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 149 150 master_priv->sarea = drm_getsarea(dev); 151 if (master_priv->sarea) { 152 master_priv->sarea_priv = (drm_i915_sarea_t *) 153 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 154 } else { 155 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); 156 } 157 158 if (init->ring_size != 0) { 159 if (dev_priv->render_ring.gem_object != NULL) { 160 i915_dma_cleanup(dev); 161 DRM_ERROR("Client tried to initialize ringbuffer in " 162 "GEM mode\n"); 163 return -EINVAL; 164 } 165 166 dev_priv->render_ring.size = init->ring_size; 167 168 dev_priv->render_ring.map.offset = init->ring_start; 169 dev_priv->render_ring.map.size = init->ring_size; 170 dev_priv->render_ring.map.type = 0; 171 dev_priv->render_ring.map.flags = 0; 172 dev_priv->render_ring.map.mtrr = 0; 173 174 drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); 175 176 if (dev_priv->render_ring.map.handle == NULL) { 177 i915_dma_cleanup(dev); 178 DRM_ERROR("can not ioremap virtual address for" 179 " ring buffer\n"); 180 return -ENOMEM; 181 } 182 } 183 184 dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; 185 186 dev_priv->cpp = init->cpp; 187 dev_priv->back_offset = init->back_offset; 188 dev_priv->front_offset = init->front_offset; 189 dev_priv->current_page = 0; 190 if (master_priv->sarea_priv) 191 master_priv->sarea_priv->pf_current_page = 0; 192 193 /* Allow hardware batchbuffers unless told otherwise. 194 */ 195 dev_priv->allow_batchbuffer = 1; 196 197 return 0; 198} 199 200static int i915_dma_resume(struct drm_device * dev) 201{ 202 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 203 204 struct intel_ring_buffer *ring; 205 DRM_DEBUG_DRIVER("%s\n", __func__); 206 207 ring = &dev_priv->render_ring; 208 209 if (ring->map.handle == NULL) { 210 DRM_ERROR("can not ioremap virtual address for" 211 " ring buffer\n"); 212 return -ENOMEM; 213 } 214 215 /* Program Hardware Status Page */ 216 if (!ring->status_page.page_addr) { 217 DRM_ERROR("Can not find hardware status page\n"); 218 return -EINVAL; 219 } 220 DRM_DEBUG_DRIVER("hw status page @ %p\n", 221 ring->status_page.page_addr); 222 if (ring->status_page.gfx_addr != 0) 223 ring->setup_status_page(dev, ring); 224 else 225 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 226 227 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 228 229 return 0; 230} 231 232static int i915_dma_init(struct drm_device *dev, void *data, 233 struct drm_file *file_priv) 234{ 235 drm_i915_init_t *init = data; 236 int retcode = 0; 237 238 switch (init->func) { 239 case I915_INIT_DMA: 240 retcode = i915_initialize(dev, init); 241 break; 242 case I915_CLEANUP_DMA: 243 retcode = i915_dma_cleanup(dev); 244 break; 245 case I915_RESUME_DMA: 246 retcode = i915_dma_resume(dev); 247 break; 248 default: 249 retcode = -EINVAL; 250 break; 251 } 252 253 return retcode; 254} 255 256/* Implement basically the same security restrictions as hardware does 257 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 258 * 259 * Most of the calculations below involve calculating the size of a 260 * particular instruction. It's important to get the size right as 261 * that tells us where the next instruction to check is. Any illegal 262 * instruction detected will be given a size of zero, which is a 263 * signal to abort the rest of the buffer. 264 */ 265static int do_validate_cmd(int cmd) 266{ 267 switch (((cmd >> 29) & 0x7)) { 268 case 0x0: 269 switch ((cmd >> 23) & 0x3f) { 270 case 0x0: 271 return 1; /* MI_NOOP */ 272 case 0x4: 273 return 1; /* MI_FLUSH */ 274 default: 275 return 0; /* disallow everything else */ 276 } 277 break; 278 case 0x1: 279 return 0; /* reserved */ 280 case 0x2: 281 return (cmd & 0xff) + 2; /* 2d commands */ 282 case 0x3: 283 if (((cmd >> 24) & 0x1f) <= 0x18) 284 return 1; 285 286 switch ((cmd >> 24) & 0x1f) { 287 case 0x1c: 288 return 1; 289 case 0x1d: 290 switch ((cmd >> 16) & 0xff) { 291 case 0x3: 292 return (cmd & 0x1f) + 2; 293 case 0x4: 294 return (cmd & 0xf) + 2; 295 default: 296 return (cmd & 0xffff) + 2; 297 } 298 case 0x1e: 299 if (cmd & (1 << 23)) 300 return (cmd & 0xffff) + 1; 301 else 302 return 1; 303 case 0x1f: 304 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 305 return (cmd & 0x1ffff) + 2; 306 else if (cmd & (1 << 17)) /* indirect random */ 307 if ((cmd & 0xffff) == 0) 308 return 0; /* unknown length, too hard */ 309 else 310 return (((cmd & 0xffff) + 1) / 2) + 1; 311 else 312 return 2; /* indirect sequential */ 313 default: 314 return 0; 315 } 316 default: 317 return 0; 318 } 319 320 return 0; 321} 322 323static int validate_cmd(int cmd) 324{ 325 int ret = do_validate_cmd(cmd); 326 327/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ 328 329 return ret; 330} 331 332static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 333{ 334 drm_i915_private_t *dev_priv = dev->dev_private; 335 int i; 336 337 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) 338 return -EINVAL; 339 340 BEGIN_LP_RING((dwords+1)&~1); 341 342 for (i = 0; i < dwords;) { 343 int cmd, sz; 344 345 cmd = buffer[i]; 346 347 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 348 return -EINVAL; 349 350 OUT_RING(cmd); 351 352 while (++i, --sz) { 353 OUT_RING(buffer[i]); 354 } 355 } 356 357 if (dwords & 1) 358 OUT_RING(0); 359 360 ADVANCE_LP_RING(); 361 362 return 0; 363} 364 365int 366i915_emit_box(struct drm_device *dev, 367 struct drm_clip_rect *boxes, 368 int i, int DR1, int DR4) 369{ 370 struct drm_clip_rect box = boxes[i]; 371 372 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 373 DRM_ERROR("Bad box %d,%d..%d,%d\n", 374 box.x1, box.y1, box.x2, box.y2); 375 return -EINVAL; 376 } 377 378 if (IS_I965G(dev)) { 379 BEGIN_LP_RING(4); 380 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 381 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 382 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 383 OUT_RING(DR4); 384 ADVANCE_LP_RING(); 385 } else { 386 BEGIN_LP_RING(6); 387 OUT_RING(GFX_OP_DRAWRECT_INFO); 388 OUT_RING(DR1); 389 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 390 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 391 OUT_RING(DR4); 392 OUT_RING(0); 393 ADVANCE_LP_RING(); 394 } 395 396 return 0; 397} 398 399/* XXX: Emitting the counter should really be moved to part of the IRQ 400 * emit. For now, do it in both places: 401 */ 402 403static void i915_emit_breadcrumb(struct drm_device *dev) 404{ 405 drm_i915_private_t *dev_priv = dev->dev_private; 406 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 407 408 dev_priv->counter++; 409 if (dev_priv->counter > 0x7FFFFFFFUL) 410 dev_priv->counter = 0; 411 if (master_priv->sarea_priv) 412 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 413 414 BEGIN_LP_RING(4); 415 OUT_RING(MI_STORE_DWORD_INDEX); 416 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 417 OUT_RING(dev_priv->counter); 418 OUT_RING(0); 419 ADVANCE_LP_RING(); 420} 421 422static int i915_dispatch_cmdbuffer(struct drm_device * dev, 423 drm_i915_cmdbuffer_t *cmd, 424 struct drm_clip_rect *cliprects, 425 void *cmdbuf) 426{ 427 int nbox = cmd->num_cliprects; 428 int i = 0, count, ret; 429 430 if (cmd->sz & 0x3) { 431 DRM_ERROR("alignment"); 432 return -EINVAL; 433 } 434 435 i915_kernel_lost_context(dev); 436 437 count = nbox ? nbox : 1; 438 439 for (i = 0; i < count; i++) { 440 if (i < nbox) { 441 ret = i915_emit_box(dev, cliprects, i, 442 cmd->DR1, cmd->DR4); 443 if (ret) 444 return ret; 445 } 446 447 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 448 if (ret) 449 return ret; 450 } 451 452 i915_emit_breadcrumb(dev); 453 return 0; 454} 455 456static int i915_dispatch_batchbuffer(struct drm_device * dev, 457 drm_i915_batchbuffer_t * batch, 458 struct drm_clip_rect *cliprects) 459{ 460 int nbox = batch->num_cliprects; 461 int i = 0, count; 462 463 if ((batch->start | batch->used) & 0x7) { 464 DRM_ERROR("alignment"); 465 return -EINVAL; 466 } 467 468 i915_kernel_lost_context(dev); 469 470 count = nbox ? nbox : 1; 471 472 for (i = 0; i < count; i++) { 473 if (i < nbox) { 474 int ret = i915_emit_box(dev, cliprects, i, 475 batch->DR1, batch->DR4); 476 if (ret) 477 return ret; 478 } 479 480 if (!IS_I830(dev) && !IS_845G(dev)) { 481 BEGIN_LP_RING(2); 482 if (IS_I965G(dev)) { 483 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 484 OUT_RING(batch->start); 485 } else { 486 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 487 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 488 } 489 ADVANCE_LP_RING(); 490 } else { 491 BEGIN_LP_RING(4); 492 OUT_RING(MI_BATCH_BUFFER); 493 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 494 OUT_RING(batch->start + batch->used - 4); 495 OUT_RING(0); 496 ADVANCE_LP_RING(); 497 } 498 } 499 500 i915_emit_breadcrumb(dev); 501 502 return 0; 503} 504 505static int i915_dispatch_flip(struct drm_device * dev) 506{ 507 drm_i915_private_t *dev_priv = dev->dev_private; 508 struct drm_i915_master_private *master_priv = 509 dev->primary->master->driver_priv; 510 511 if (!master_priv->sarea_priv) 512 return -EINVAL; 513 514 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 515 __func__, 516 dev_priv->current_page, 517 master_priv->sarea_priv->pf_current_page); 518 519 i915_kernel_lost_context(dev); 520 521 BEGIN_LP_RING(2); 522 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 523 OUT_RING(0); 524 ADVANCE_LP_RING(); 525 526 BEGIN_LP_RING(6); 527 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 528 OUT_RING(0); 529 if (dev_priv->current_page == 0) { 530 OUT_RING(dev_priv->back_offset); 531 dev_priv->current_page = 1; 532 } else { 533 OUT_RING(dev_priv->front_offset); 534 dev_priv->current_page = 0; 535 } 536 OUT_RING(0); 537 ADVANCE_LP_RING(); 538 539 BEGIN_LP_RING(2); 540 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 541 OUT_RING(0); 542 ADVANCE_LP_RING(); 543 544 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 545 546 BEGIN_LP_RING(4); 547 OUT_RING(MI_STORE_DWORD_INDEX); 548 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 549 OUT_RING(dev_priv->counter); 550 OUT_RING(0); 551 ADVANCE_LP_RING(); 552 553 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 554 return 0; 555} 556 557static int i915_quiescent(struct drm_device * dev) 558{ 559 drm_i915_private_t *dev_priv = dev->dev_private; 560 561 i915_kernel_lost_context(dev); 562 return intel_wait_ring_buffer(dev, &dev_priv->render_ring, 563 dev_priv->render_ring.size - 8); 564} 565 566static int i915_flush_ioctl(struct drm_device *dev, void *data, 567 struct drm_file *file_priv) 568{ 569 int ret; 570 571 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 572 573 mutex_lock(&dev->struct_mutex); 574 ret = i915_quiescent(dev); 575 mutex_unlock(&dev->struct_mutex); 576 577 return ret; 578} 579 580static int i915_batchbuffer(struct drm_device *dev, void *data, 581 struct drm_file *file_priv) 582{ 583 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 584 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 585 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 586 master_priv->sarea_priv; 587 drm_i915_batchbuffer_t *batch = data; 588 int ret; 589 struct drm_clip_rect *cliprects = NULL; 590 591 if (!dev_priv->allow_batchbuffer) { 592 DRM_ERROR("Batchbuffer ioctl disabled\n"); 593 return -EINVAL; 594 } 595 596 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", 597 batch->start, batch->used, batch->num_cliprects); 598 599 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 600 601 if (batch->num_cliprects < 0) 602 return -EINVAL; 603 604 if (batch->num_cliprects) { 605 cliprects = kcalloc(batch->num_cliprects, 606 sizeof(struct drm_clip_rect), 607 GFP_KERNEL); 608 if (cliprects == NULL) 609 return -ENOMEM; 610 611 ret = copy_from_user(cliprects, batch->cliprects, 612 batch->num_cliprects * 613 sizeof(struct drm_clip_rect)); 614 if (ret != 0) 615 goto fail_free; 616 } 617 618 mutex_lock(&dev->struct_mutex); 619 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 620 mutex_unlock(&dev->struct_mutex); 621 622 if (sarea_priv) 623 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 624 625fail_free: 626 kfree(cliprects); 627 628 return ret; 629} 630 631static int i915_cmdbuffer(struct drm_device *dev, void *data, 632 struct drm_file *file_priv) 633{ 634 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 635 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 636 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 637 master_priv->sarea_priv; 638 drm_i915_cmdbuffer_t *cmdbuf = data; 639 struct drm_clip_rect *cliprects = NULL; 640 void *batch_data; 641 int ret; 642 643 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 644 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 645 646 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 647 648 if (cmdbuf->num_cliprects < 0) 649 return -EINVAL; 650 651 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); 652 if (batch_data == NULL) 653 return -ENOMEM; 654 655 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 656 if (ret != 0) 657 goto fail_batch_free; 658 659 if (cmdbuf->num_cliprects) { 660 cliprects = kcalloc(cmdbuf->num_cliprects, 661 sizeof(struct drm_clip_rect), GFP_KERNEL); 662 if (cliprects == NULL) { 663 ret = -ENOMEM; 664 goto fail_batch_free; 665 } 666 667 ret = copy_from_user(cliprects, cmdbuf->cliprects, 668 cmdbuf->num_cliprects * 669 sizeof(struct drm_clip_rect)); 670 if (ret != 0) 671 goto fail_clip_free; 672 } 673 674 mutex_lock(&dev->struct_mutex); 675 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 676 mutex_unlock(&dev->struct_mutex); 677 if (ret) { 678 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 679 goto fail_clip_free; 680 } 681 682 if (sarea_priv) 683 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 684 685fail_clip_free: 686 kfree(cliprects); 687fail_batch_free: 688 kfree(batch_data); 689 690 return ret; 691} 692 693static int i915_flip_bufs(struct drm_device *dev, void *data, 694 struct drm_file *file_priv) 695{ 696 int ret; 697 698 DRM_DEBUG_DRIVER("%s\n", __func__); 699 700 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 701 702 mutex_lock(&dev->struct_mutex); 703 ret = i915_dispatch_flip(dev); 704 mutex_unlock(&dev->struct_mutex); 705 706 return ret; 707} 708 709static int i915_getparam(struct drm_device *dev, void *data, 710 struct drm_file *file_priv) 711{ 712 drm_i915_private_t *dev_priv = dev->dev_private; 713 drm_i915_getparam_t *param = data; 714 int value; 715 716 if (!dev_priv) { 717 DRM_ERROR("called with no initialization\n"); 718 return -EINVAL; 719 } 720 721 switch (param->param) { 722 case I915_PARAM_IRQ_ACTIVE: 723 value = dev->pdev->irq ? 1 : 0; 724 break; 725 case I915_PARAM_ALLOW_BATCHBUFFER: 726 value = dev_priv->allow_batchbuffer ? 1 : 0; 727 break; 728 case I915_PARAM_LAST_DISPATCH: 729 value = READ_BREADCRUMB(dev_priv); 730 break; 731 case I915_PARAM_CHIPSET_ID: 732 value = dev->pci_device; 733 break; 734 case I915_PARAM_HAS_GEM: 735 value = dev_priv->has_gem; 736 break; 737 case I915_PARAM_NUM_FENCES_AVAIL: 738 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 739 break; 740 case I915_PARAM_HAS_OVERLAY: 741 value = dev_priv->overlay ? 1 : 0; 742 break; 743 case I915_PARAM_HAS_PAGEFLIPPING: 744 value = 1; 745 break; 746 case I915_PARAM_HAS_EXECBUF2: 747 /* depends on GEM */ 748 value = dev_priv->has_gem; 749 break; 750 case I915_PARAM_HAS_BSD: 751 value = HAS_BSD(dev); 752 break; 753 default: 754 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 755 param->param); 756 return -EINVAL; 757 } 758 759 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 760 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 761 return -EFAULT; 762 } 763 764 return 0; 765} 766 767static int i915_setparam(struct drm_device *dev, void *data, 768 struct drm_file *file_priv) 769{ 770 drm_i915_private_t *dev_priv = dev->dev_private; 771 drm_i915_setparam_t *param = data; 772 773 if (!dev_priv) { 774 DRM_ERROR("called with no initialization\n"); 775 return -EINVAL; 776 } 777 778 switch (param->param) { 779 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 780 break; 781 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 782 dev_priv->tex_lru_log_granularity = param->value; 783 break; 784 case I915_SETPARAM_ALLOW_BATCHBUFFER: 785 dev_priv->allow_batchbuffer = param->value; 786 break; 787 case I915_SETPARAM_NUM_USED_FENCES: 788 if (param->value > dev_priv->num_fence_regs || 789 param->value < 0) 790 return -EINVAL; 791 /* Userspace can use first N regs */ 792 dev_priv->fence_reg_start = param->value; 793 break; 794 default: 795 DRM_DEBUG_DRIVER("unknown parameter %d\n", 796 param->param); 797 return -EINVAL; 798 } 799 800 return 0; 801} 802 803static int i915_set_status_page(struct drm_device *dev, void *data, 804 struct drm_file *file_priv) 805{ 806 drm_i915_private_t *dev_priv = dev->dev_private; 807 drm_i915_hws_addr_t *hws = data; 808 struct intel_ring_buffer *ring = &dev_priv->render_ring; 809 810 if (!I915_NEED_GFX_HWS(dev)) 811 return -EINVAL; 812 813 if (!dev_priv) { 814 DRM_ERROR("called with no initialization\n"); 815 return -EINVAL; 816 } 817 818 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 819 WARN(1, "tried to set status page when mode setting active\n"); 820 return 0; 821 } 822 823 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 824 825 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 826 827 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 828 dev_priv->hws_map.size = 4*1024; 829 dev_priv->hws_map.type = 0; 830 dev_priv->hws_map.flags = 0; 831 dev_priv->hws_map.mtrr = 0; 832 833 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 834 if (dev_priv->hws_map.handle == NULL) { 835 i915_dma_cleanup(dev); 836 ring->status_page.gfx_addr = 0; 837 DRM_ERROR("can not ioremap virtual address for" 838 " G33 hw status page\n"); 839 return -ENOMEM; 840 } 841 ring->status_page.page_addr = dev_priv->hws_map.handle; 842 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 843 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 844 845 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 846 ring->status_page.gfx_addr); 847 DRM_DEBUG_DRIVER("load hws at %p\n", 848 ring->status_page.page_addr); 849 return 0; 850} 851 852static int i915_get_bridge_dev(struct drm_device *dev) 853{ 854 struct drm_i915_private *dev_priv = dev->dev_private; 855 856 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 857 if (!dev_priv->bridge_dev) { 858 DRM_ERROR("bridge device not found\n"); 859 return -1; 860 } 861 return 0; 862} 863 864#define MCHBAR_I915 0x44 865#define MCHBAR_I965 0x48 866#define MCHBAR_SIZE (4*4096) 867 868#define DEVEN_REG 0x54 869#define DEVEN_MCHBAR_EN (1 << 28) 870 871/* Allocate space for the MCH regs if needed, return nonzero on error */ 872static int 873intel_alloc_mchbar_resource(struct drm_device *dev) 874{ 875 drm_i915_private_t *dev_priv = dev->dev_private; 876 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 877 u32 temp_lo, temp_hi = 0; 878 u64 mchbar_addr; 879 int ret = 0; 880 881 if (IS_I965G(dev)) 882 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 883 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 884 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 885 886 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 887#ifdef CONFIG_PNP 888 if (mchbar_addr && 889 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { 890 ret = 0; 891 goto out; 892 } 893#endif 894 895 /* Get some space for it */ 896 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, 897 MCHBAR_SIZE, MCHBAR_SIZE, 898 PCIBIOS_MIN_MEM, 899 0, pcibios_align_resource, 900 dev_priv->bridge_dev); 901 if (ret) { 902 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 903 dev_priv->mch_res.start = 0; 904 goto out; 905 } 906 907 if (IS_I965G(dev)) 908 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 909 upper_32_bits(dev_priv->mch_res.start)); 910 911 pci_write_config_dword(dev_priv->bridge_dev, reg, 912 lower_32_bits(dev_priv->mch_res.start)); 913out: 914 return ret; 915} 916 917/* Setup MCHBAR if possible, return true if we should disable it again */ 918static void 919intel_setup_mchbar(struct drm_device *dev) 920{ 921 drm_i915_private_t *dev_priv = dev->dev_private; 922 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 923 u32 temp; 924 bool enabled; 925 926 dev_priv->mchbar_need_disable = false; 927 928 if (IS_I915G(dev) || IS_I915GM(dev)) { 929 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 930 enabled = !!(temp & DEVEN_MCHBAR_EN); 931 } else { 932 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 933 enabled = temp & 1; 934 } 935 936 /* If it's already enabled, don't have to do anything */ 937 if (enabled) 938 return; 939 940 if (intel_alloc_mchbar_resource(dev)) 941 return; 942 943 dev_priv->mchbar_need_disable = true; 944 945 /* Space is allocated or reserved, so enable it. */ 946 if (IS_I915G(dev) || IS_I915GM(dev)) { 947 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, 948 temp | DEVEN_MCHBAR_EN); 949 } else { 950 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 951 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 952 } 953} 954 955static void 956intel_teardown_mchbar(struct drm_device *dev) 957{ 958 drm_i915_private_t *dev_priv = dev->dev_private; 959 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 960 u32 temp; 961 962 if (dev_priv->mchbar_need_disable) { 963 if (IS_I915G(dev) || IS_I915GM(dev)) { 964 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 965 temp &= ~DEVEN_MCHBAR_EN; 966 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); 967 } else { 968 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 969 temp &= ~1; 970 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); 971 } 972 } 973 974 if (dev_priv->mch_res.start) 975 release_resource(&dev_priv->mch_res); 976} 977 978/** 979 * i915_probe_agp - get AGP bootup configuration 980 * @pdev: PCI device 981 * @aperture_size: returns AGP aperture configured size 982 * @preallocated_size: returns size of BIOS preallocated AGP space 983 * 984 * Since Intel integrated graphics are UMA, the BIOS has to set aside 985 * some RAM for the framebuffer at early boot. This code figures out 986 * how much was set aside so we can use it for our own purposes. 987 */ 988static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, 989 uint32_t *preallocated_size, 990 uint32_t *start) 991{ 992 struct drm_i915_private *dev_priv = dev->dev_private; 993 u16 tmp = 0; 994 unsigned long overhead; 995 unsigned long stolen; 996 997 /* Get the fb aperture size and "stolen" memory amount. */ 998 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp); 999 1000 *aperture_size = 1024 * 1024; 1001 *preallocated_size = 1024 * 1024; 1002 1003 switch (dev->pdev->device) { 1004 case PCI_DEVICE_ID_INTEL_82830_CGC: 1005 case PCI_DEVICE_ID_INTEL_82845G_IG: 1006 case PCI_DEVICE_ID_INTEL_82855GM_IG: 1007 case PCI_DEVICE_ID_INTEL_82865_IG: 1008 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) 1009 *aperture_size *= 64; 1010 else 1011 *aperture_size *= 128; 1012 break; 1013 default: 1014 /* 9xx supports large sizes, just look at the length */ 1015 *aperture_size = pci_resource_len(dev->pdev, 2); 1016 break; 1017 } 1018 1019 /* 1020 * Some of the preallocated space is taken by the GTT 1021 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 1022 */ 1023 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) 1024 overhead = 4096; 1025 else 1026 overhead = (*aperture_size / 1024) + 4096; 1027 1028 if (IS_GEN6(dev)) { 1029 /* SNB has memory control reg at 0x50.w */ 1030 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp); 1031 1032 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) { 1033 case INTEL_855_GMCH_GMS_DISABLED: 1034 DRM_ERROR("video memory is disabled\n"); 1035 return -1; 1036 case SNB_GMCH_GMS_STOLEN_32M: 1037 stolen = 32 * 1024 * 1024; 1038 break; 1039 case SNB_GMCH_GMS_STOLEN_64M: 1040 stolen = 64 * 1024 * 1024; 1041 break; 1042 case SNB_GMCH_GMS_STOLEN_96M: 1043 stolen = 96 * 1024 * 1024; 1044 break; 1045 case SNB_GMCH_GMS_STOLEN_128M: 1046 stolen = 128 * 1024 * 1024; 1047 break; 1048 case SNB_GMCH_GMS_STOLEN_160M: 1049 stolen = 160 * 1024 * 1024; 1050 break; 1051 case SNB_GMCH_GMS_STOLEN_192M: 1052 stolen = 192 * 1024 * 1024; 1053 break; 1054 case SNB_GMCH_GMS_STOLEN_224M: 1055 stolen = 224 * 1024 * 1024; 1056 break; 1057 case SNB_GMCH_GMS_STOLEN_256M: 1058 stolen = 256 * 1024 * 1024; 1059 break; 1060 case SNB_GMCH_GMS_STOLEN_288M: 1061 stolen = 288 * 1024 * 1024; 1062 break; 1063 case SNB_GMCH_GMS_STOLEN_320M: 1064 stolen = 320 * 1024 * 1024; 1065 break; 1066 case SNB_GMCH_GMS_STOLEN_352M: 1067 stolen = 352 * 1024 * 1024; 1068 break; 1069 case SNB_GMCH_GMS_STOLEN_384M: 1070 stolen = 384 * 1024 * 1024; 1071 break; 1072 case SNB_GMCH_GMS_STOLEN_416M: 1073 stolen = 416 * 1024 * 1024; 1074 break; 1075 case SNB_GMCH_GMS_STOLEN_448M: 1076 stolen = 448 * 1024 * 1024; 1077 break; 1078 case SNB_GMCH_GMS_STOLEN_480M: 1079 stolen = 480 * 1024 * 1024; 1080 break; 1081 case SNB_GMCH_GMS_STOLEN_512M: 1082 stolen = 512 * 1024 * 1024; 1083 break; 1084 default: 1085 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 1086 tmp & SNB_GMCH_GMS_STOLEN_MASK); 1087 return -1; 1088 } 1089 } else { 1090 switch (tmp & INTEL_GMCH_GMS_MASK) { 1091 case INTEL_855_GMCH_GMS_DISABLED: 1092 DRM_ERROR("video memory is disabled\n"); 1093 return -1; 1094 case INTEL_855_GMCH_GMS_STOLEN_1M: 1095 stolen = 1 * 1024 * 1024; 1096 break; 1097 case INTEL_855_GMCH_GMS_STOLEN_4M: 1098 stolen = 4 * 1024 * 1024; 1099 break; 1100 case INTEL_855_GMCH_GMS_STOLEN_8M: 1101 stolen = 8 * 1024 * 1024; 1102 break; 1103 case INTEL_855_GMCH_GMS_STOLEN_16M: 1104 stolen = 16 * 1024 * 1024; 1105 break; 1106 case INTEL_855_GMCH_GMS_STOLEN_32M: 1107 stolen = 32 * 1024 * 1024; 1108 break; 1109 case INTEL_915G_GMCH_GMS_STOLEN_48M: 1110 stolen = 48 * 1024 * 1024; 1111 break; 1112 case INTEL_915G_GMCH_GMS_STOLEN_64M: 1113 stolen = 64 * 1024 * 1024; 1114 break; 1115 case INTEL_GMCH_GMS_STOLEN_128M: 1116 stolen = 128 * 1024 * 1024; 1117 break; 1118 case INTEL_GMCH_GMS_STOLEN_256M: 1119 stolen = 256 * 1024 * 1024; 1120 break; 1121 case INTEL_GMCH_GMS_STOLEN_96M: 1122 stolen = 96 * 1024 * 1024; 1123 break; 1124 case INTEL_GMCH_GMS_STOLEN_160M: 1125 stolen = 160 * 1024 * 1024; 1126 break; 1127 case INTEL_GMCH_GMS_STOLEN_224M: 1128 stolen = 224 * 1024 * 1024; 1129 break; 1130 case INTEL_GMCH_GMS_STOLEN_352M: 1131 stolen = 352 * 1024 * 1024; 1132 break; 1133 default: 1134 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 1135 tmp & INTEL_GMCH_GMS_MASK); 1136 return -1; 1137 } 1138 } 1139 1140 *preallocated_size = stolen - overhead; 1141 *start = overhead; 1142 1143 return 0; 1144} 1145 1146#define PTE_ADDRESS_MASK 0xfffff000 1147#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 1148#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 1149#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ 1150#define PTE_MAPPING_TYPE_CACHED (3 << 1) 1151#define PTE_MAPPING_TYPE_MASK (3 << 1) 1152#define PTE_VALID (1 << 0) 1153 1154/** 1155 * i915_gtt_to_phys - take a GTT address and turn it into a physical one 1156 * @dev: drm device 1157 * @gtt_addr: address to translate 1158 * 1159 * Some chip functions require allocations from stolen space but need the 1160 * physical address of the memory in question. We use this routine 1161 * to get a physical address suitable for register programming from a given 1162 * GTT address. 1163 */ 1164static unsigned long i915_gtt_to_phys(struct drm_device *dev, 1165 unsigned long gtt_addr) 1166{ 1167 unsigned long *gtt; 1168 unsigned long entry, phys; 1169 int gtt_bar = IS_I9XX(dev) ? 0 : 1; 1170 int gtt_offset, gtt_size; 1171 1172 if (IS_I965G(dev)) { 1173 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 1174 gtt_offset = 2*1024*1024; 1175 gtt_size = 2*1024*1024; 1176 } else { 1177 gtt_offset = 512*1024; 1178 gtt_size = 512*1024; 1179 } 1180 } else { 1181 gtt_bar = 3; 1182 gtt_offset = 0; 1183 gtt_size = pci_resource_len(dev->pdev, gtt_bar); 1184 } 1185 1186 gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, 1187 gtt_size); 1188 if (!gtt) { 1189 DRM_ERROR("ioremap of GTT failed\n"); 1190 return 0; 1191 } 1192 1193 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); 1194 1195 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); 1196 1197 /* Mask out these reserved bits on this hardware. */ 1198 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || 1199 IS_I945G(dev) || IS_I945GM(dev)) { 1200 entry &= ~PTE_ADDRESS_MASK_HIGH; 1201 } 1202 1203 /* If it's not a mapping type we know, then bail. */ 1204 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && 1205 (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { 1206 iounmap(gtt); 1207 return 0; 1208 } 1209 1210 if (!(entry & PTE_VALID)) { 1211 DRM_ERROR("bad GTT entry in stolen space\n"); 1212 iounmap(gtt); 1213 return 0; 1214 } 1215 1216 iounmap(gtt); 1217 1218 phys =(entry & PTE_ADDRESS_MASK) | 1219 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); 1220 1221 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); 1222 1223 return phys; 1224} 1225 1226static void i915_warn_stolen(struct drm_device *dev) 1227{ 1228 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); 1229 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); 1230} 1231 1232static void i915_setup_compression(struct drm_device *dev, int size) 1233{ 1234 struct drm_i915_private *dev_priv = dev->dev_private; 1235 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 1236 unsigned long cfb_base; 1237 unsigned long ll_base = 0; 1238 1239 /* Leave 1M for line length buffer & misc. */ 1240 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1241 if (!compressed_fb) { 1242 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1243 i915_warn_stolen(dev); 1244 return; 1245 } 1246 1247 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1248 if (!compressed_fb) { 1249 i915_warn_stolen(dev); 1250 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1251 return; 1252 } 1253 1254 cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); 1255 if (!cfb_base) { 1256 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); 1257 drm_mm_put_block(compressed_fb); 1258 } 1259 1260 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { 1261 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 1262 4096, 0); 1263 if (!compressed_llb) { 1264 i915_warn_stolen(dev); 1265 return; 1266 } 1267 1268 compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); 1269 if (!compressed_llb) { 1270 i915_warn_stolen(dev); 1271 return; 1272 } 1273 1274 ll_base = i915_gtt_to_phys(dev, compressed_llb->start); 1275 if (!ll_base) { 1276 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); 1277 drm_mm_put_block(compressed_fb); 1278 drm_mm_put_block(compressed_llb); 1279 } 1280 } 1281 1282 dev_priv->cfb_size = size; 1283 1284 intel_disable_fbc(dev); 1285 dev_priv->compressed_fb = compressed_fb; 1286 if (IS_IRONLAKE_M(dev)) 1287 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1288 else if (IS_GM45(dev)) { 1289 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1290 } else { 1291 I915_WRITE(FBC_CFB_BASE, cfb_base); 1292 I915_WRITE(FBC_LL_BASE, ll_base); 1293 dev_priv->compressed_llb = compressed_llb; 1294 } 1295 1296 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1297 ll_base, size >> 20); 1298} 1299 1300static void i915_cleanup_compression(struct drm_device *dev) 1301{ 1302 struct drm_i915_private *dev_priv = dev->dev_private; 1303 1304 drm_mm_put_block(dev_priv->compressed_fb); 1305 if (!IS_GM45(dev)) 1306 drm_mm_put_block(dev_priv->compressed_llb); 1307} 1308 1309/* true = enable decode, false = disable decoder */ 1310static unsigned int i915_vga_set_decode(void *cookie, bool state) 1311{ 1312 struct drm_device *dev = cookie; 1313 1314 intel_modeset_vga_set_state(dev, state); 1315 if (state) 1316 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1317 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1318 else 1319 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1320} 1321 1322static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1323{ 1324 struct drm_device *dev = pci_get_drvdata(pdev); 1325 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1326 if (state == VGA_SWITCHEROO_ON) { 1327 printk(KERN_INFO "i915: switched on\n"); 1328 /* i915 resume handler doesn't set to D0 */ 1329 pci_set_power_state(dev->pdev, PCI_D0); 1330 i915_resume(dev); 1331 drm_kms_helper_poll_enable(dev); 1332 } else { 1333 printk(KERN_ERR "i915: switched off\n"); 1334 drm_kms_helper_poll_disable(dev); 1335 i915_suspend(dev, pmm); 1336 } 1337} 1338 1339static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 1340{ 1341 struct drm_device *dev = pci_get_drvdata(pdev); 1342 bool can_switch; 1343 1344 spin_lock(&dev->count_lock); 1345 can_switch = (dev->open_count == 0); 1346 spin_unlock(&dev->count_lock); 1347 return can_switch; 1348} 1349 1350static int i915_load_modeset_init(struct drm_device *dev, 1351 unsigned long prealloc_start, 1352 unsigned long prealloc_size, 1353 unsigned long agp_size) 1354{ 1355 struct drm_i915_private *dev_priv = dev->dev_private; 1356 int fb_bar = IS_I9XX(dev) ? 2 : 0; 1357 int ret = 0; 1358 1359 dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & 1360 0xff000000; 1361 1362 /* Basic memrange allocator for stolen space (aka vram) */ 1363 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1364 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); 1365 1366 /* We're off and running w/KMS */ 1367 dev_priv->mm.suspended = 0; 1368 1369 /* Let GEM Manage from end of prealloc space to end of aperture. 1370 * 1371 * However, leave one page at the end still bound to the scratch page. 1372 * There are a number of places where the hardware apparently 1373 * prefetches past the end of the object, and we've seen multiple 1374 * hangs with the GPU head pointer stuck in a batchbuffer bound 1375 * at the last page of the aperture. One page should be enough to 1376 * keep any prefetching inside of the aperture. 1377 */ 1378 i915_gem_do_init(dev, prealloc_size, agp_size - 4096); 1379 1380 mutex_lock(&dev->struct_mutex); 1381 ret = i915_gem_init_ringbuffer(dev); 1382 mutex_unlock(&dev->struct_mutex); 1383 if (ret) 1384 goto out; 1385 1386 /* Try to set up FBC with a reasonable compressed buffer size */ 1387 if (I915_HAS_FBC(dev) && i915_powersave) { 1388 int cfb_size; 1389 1390 /* Try to get an 8M buffer... */ 1391 if (prealloc_size > (9*1024*1024)) 1392 cfb_size = 8*1024*1024; 1393 else /* fall back to 7/8 of the stolen space */ 1394 cfb_size = prealloc_size * 7 / 8; 1395 i915_setup_compression(dev, cfb_size); 1396 } 1397 1398 /* Allow hardware batchbuffers unless told otherwise. 1399 */ 1400 dev_priv->allow_batchbuffer = 1; 1401 1402 ret = intel_init_bios(dev); 1403 if (ret) 1404 DRM_INFO("failed to find VBIOS tables\n"); 1405 1406 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1407 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1408 if (ret) 1409 goto cleanup_ringbuffer; 1410 1411 ret = vga_switcheroo_register_client(dev->pdev, 1412 i915_switcheroo_set_state, 1413 i915_switcheroo_can_switch); 1414 if (ret) 1415 goto cleanup_vga_client; 1416 1417 /* IIR "flip pending" bit means done if this bit is set */ 1418 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) 1419 dev_priv->flip_pending_is_done = true; 1420 1421 intel_modeset_init(dev); 1422 1423 ret = drm_irq_install(dev); 1424 if (ret) 1425 goto cleanup_vga_switcheroo; 1426 1427 /* Always safe in the mode setting case. */ 1428 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1429 dev->vblank_disable_allowed = 1; 1430 1431 /* 1432 * Initialize the hardware status page IRQ location. 1433 */ 1434 1435 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1436 1437 ret = intel_fbdev_init(dev); 1438 if (ret) 1439 goto cleanup_irq; 1440 1441 drm_kms_helper_poll_init(dev); 1442 return 0; 1443 1444cleanup_irq: 1445 drm_irq_uninstall(dev); 1446cleanup_vga_switcheroo: 1447 vga_switcheroo_unregister_client(dev->pdev); 1448cleanup_vga_client: 1449 vga_client_register(dev->pdev, NULL, NULL, NULL); 1450cleanup_ringbuffer: 1451 mutex_lock(&dev->struct_mutex); 1452 i915_gem_cleanup_ringbuffer(dev); 1453 mutex_unlock(&dev->struct_mutex); 1454out: 1455 return ret; 1456} 1457 1458int i915_master_create(struct drm_device *dev, struct drm_master *master) 1459{ 1460 struct drm_i915_master_private *master_priv; 1461 1462 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 1463 if (!master_priv) 1464 return -ENOMEM; 1465 1466 master->driver_priv = master_priv; 1467 return 0; 1468} 1469 1470void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1471{ 1472 struct drm_i915_master_private *master_priv = master->driver_priv; 1473 1474 if (!master_priv) 1475 return; 1476 1477 kfree(master_priv); 1478 1479 master->driver_priv = NULL; 1480} 1481 1482static void i915_pineview_get_mem_freq(struct drm_device *dev) 1483{ 1484 drm_i915_private_t *dev_priv = dev->dev_private; 1485 u32 tmp; 1486 1487 tmp = I915_READ(CLKCFG); 1488 1489 switch (tmp & CLKCFG_FSB_MASK) { 1490 case CLKCFG_FSB_533: 1491 dev_priv->fsb_freq = 533; /* 133*4 */ 1492 break; 1493 case CLKCFG_FSB_800: 1494 dev_priv->fsb_freq = 800; /* 200*4 */ 1495 break; 1496 case CLKCFG_FSB_667: 1497 dev_priv->fsb_freq = 667; /* 167*4 */ 1498 break; 1499 case CLKCFG_FSB_400: 1500 dev_priv->fsb_freq = 400; /* 100*4 */ 1501 break; 1502 } 1503 1504 switch (tmp & CLKCFG_MEM_MASK) { 1505 case CLKCFG_MEM_533: 1506 dev_priv->mem_freq = 533; 1507 break; 1508 case CLKCFG_MEM_667: 1509 dev_priv->mem_freq = 667; 1510 break; 1511 case CLKCFG_MEM_800: 1512 dev_priv->mem_freq = 800; 1513 break; 1514 } 1515 1516 /* detect pineview DDR3 setting */ 1517 tmp = I915_READ(CSHRDDR3CTL); 1518 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 1519} 1520 1521static void i915_ironlake_get_mem_freq(struct drm_device *dev) 1522{ 1523 drm_i915_private_t *dev_priv = dev->dev_private; 1524 u16 ddrpll, csipll; 1525 1526 ddrpll = I915_READ16(DDRMPLL1); 1527 csipll = I915_READ16(CSIPLL0); 1528 1529 switch (ddrpll & 0xff) { 1530 case 0xc: 1531 dev_priv->mem_freq = 800; 1532 break; 1533 case 0x10: 1534 dev_priv->mem_freq = 1066; 1535 break; 1536 case 0x14: 1537 dev_priv->mem_freq = 1333; 1538 break; 1539 case 0x18: 1540 dev_priv->mem_freq = 1600; 1541 break; 1542 default: 1543 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 1544 ddrpll & 0xff); 1545 dev_priv->mem_freq = 0; 1546 break; 1547 } 1548 1549 dev_priv->r_t = dev_priv->mem_freq; 1550 1551 switch (csipll & 0x3ff) { 1552 case 0x00c: 1553 dev_priv->fsb_freq = 3200; 1554 break; 1555 case 0x00e: 1556 dev_priv->fsb_freq = 3733; 1557 break; 1558 case 0x010: 1559 dev_priv->fsb_freq = 4266; 1560 break; 1561 case 0x012: 1562 dev_priv->fsb_freq = 4800; 1563 break; 1564 case 0x014: 1565 dev_priv->fsb_freq = 5333; 1566 break; 1567 case 0x016: 1568 dev_priv->fsb_freq = 5866; 1569 break; 1570 case 0x018: 1571 dev_priv->fsb_freq = 6400; 1572 break; 1573 default: 1574 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 1575 csipll & 0x3ff); 1576 dev_priv->fsb_freq = 0; 1577 break; 1578 } 1579 1580 if (dev_priv->fsb_freq == 3200) { 1581 dev_priv->c_m = 0; 1582 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 1583 dev_priv->c_m = 1; 1584 } else { 1585 dev_priv->c_m = 2; 1586 } 1587} 1588 1589struct v_table { 1590 u8 vid; 1591 unsigned long vd; /* in .1 mil */ 1592 unsigned long vm; /* in .1 mil */ 1593 u8 pvid; 1594}; 1595 1596static struct v_table v_table[] = { 1597 { 0, 16125, 15000, 0x7f, }, 1598 { 1, 16000, 14875, 0x7e, }, 1599 { 2, 15875, 14750, 0x7d, }, 1600 { 3, 15750, 14625, 0x7c, }, 1601 { 4, 15625, 14500, 0x7b, }, 1602 { 5, 15500, 14375, 0x7a, }, 1603 { 6, 15375, 14250, 0x79, }, 1604 { 7, 15250, 14125, 0x78, }, 1605 { 8, 15125, 14000, 0x77, }, 1606 { 9, 15000, 13875, 0x76, }, 1607 { 10, 14875, 13750, 0x75, }, 1608 { 11, 14750, 13625, 0x74, }, 1609 { 12, 14625, 13500, 0x73, }, 1610 { 13, 14500, 13375, 0x72, }, 1611 { 14, 14375, 13250, 0x71, }, 1612 { 15, 14250, 13125, 0x70, }, 1613 { 16, 14125, 13000, 0x6f, }, 1614 { 17, 14000, 12875, 0x6e, }, 1615 { 18, 13875, 12750, 0x6d, }, 1616 { 19, 13750, 12625, 0x6c, }, 1617 { 20, 13625, 12500, 0x6b, }, 1618 { 21, 13500, 12375, 0x6a, }, 1619 { 22, 13375, 12250, 0x69, }, 1620 { 23, 13250, 12125, 0x68, }, 1621 { 24, 13125, 12000, 0x67, }, 1622 { 25, 13000, 11875, 0x66, }, 1623 { 26, 12875, 11750, 0x65, }, 1624 { 27, 12750, 11625, 0x64, }, 1625 { 28, 12625, 11500, 0x63, }, 1626 { 29, 12500, 11375, 0x62, }, 1627 { 30, 12375, 11250, 0x61, }, 1628 { 31, 12250, 11125, 0x60, }, 1629 { 32, 12125, 11000, 0x5f, }, 1630 { 33, 12000, 10875, 0x5e, }, 1631 { 34, 11875, 10750, 0x5d, }, 1632 { 35, 11750, 10625, 0x5c, }, 1633 { 36, 11625, 10500, 0x5b, }, 1634 { 37, 11500, 10375, 0x5a, }, 1635 { 38, 11375, 10250, 0x59, }, 1636 { 39, 11250, 10125, 0x58, }, 1637 { 40, 11125, 10000, 0x57, }, 1638 { 41, 11000, 9875, 0x56, }, 1639 { 42, 10875, 9750, 0x55, }, 1640 { 43, 10750, 9625, 0x54, }, 1641 { 44, 10625, 9500, 0x53, }, 1642 { 45, 10500, 9375, 0x52, }, 1643 { 46, 10375, 9250, 0x51, }, 1644 { 47, 10250, 9125, 0x50, }, 1645 { 48, 10125, 9000, 0x4f, }, 1646 { 49, 10000, 8875, 0x4e, }, 1647 { 50, 9875, 8750, 0x4d, }, 1648 { 51, 9750, 8625, 0x4c, }, 1649 { 52, 9625, 8500, 0x4b, }, 1650 { 53, 9500, 8375, 0x4a, }, 1651 { 54, 9375, 8250, 0x49, }, 1652 { 55, 9250, 8125, 0x48, }, 1653 { 56, 9125, 8000, 0x47, }, 1654 { 57, 9000, 7875, 0x46, }, 1655 { 58, 8875, 7750, 0x45, }, 1656 { 59, 8750, 7625, 0x44, }, 1657 { 60, 8625, 7500, 0x43, }, 1658 { 61, 8500, 7375, 0x42, }, 1659 { 62, 8375, 7250, 0x41, }, 1660 { 63, 8250, 7125, 0x40, }, 1661 { 64, 8125, 7000, 0x3f, }, 1662 { 65, 8000, 6875, 0x3e, }, 1663 { 66, 7875, 6750, 0x3d, }, 1664 { 67, 7750, 6625, 0x3c, }, 1665 { 68, 7625, 6500, 0x3b, }, 1666 { 69, 7500, 6375, 0x3a, }, 1667 { 70, 7375, 6250, 0x39, }, 1668 { 71, 7250, 6125, 0x38, }, 1669 { 72, 7125, 6000, 0x37, }, 1670 { 73, 7000, 5875, 0x36, }, 1671 { 74, 6875, 5750, 0x35, }, 1672 { 75, 6750, 5625, 0x34, }, 1673 { 76, 6625, 5500, 0x33, }, 1674 { 77, 6500, 5375, 0x32, }, 1675 { 78, 6375, 5250, 0x31, }, 1676 { 79, 6250, 5125, 0x30, }, 1677 { 80, 6125, 5000, 0x2f, }, 1678 { 81, 6000, 4875, 0x2e, }, 1679 { 82, 5875, 4750, 0x2d, }, 1680 { 83, 5750, 4625, 0x2c, }, 1681 { 84, 5625, 4500, 0x2b, }, 1682 { 85, 5500, 4375, 0x2a, }, 1683 { 86, 5375, 4250, 0x29, }, 1684 { 87, 5250, 4125, 0x28, }, 1685 { 88, 5125, 4000, 0x27, }, 1686 { 89, 5000, 3875, 0x26, }, 1687 { 90, 4875, 3750, 0x25, }, 1688 { 91, 4750, 3625, 0x24, }, 1689 { 92, 4625, 3500, 0x23, }, 1690 { 93, 4500, 3375, 0x22, }, 1691 { 94, 4375, 3250, 0x21, }, 1692 { 95, 4250, 3125, 0x20, }, 1693 { 96, 4125, 3000, 0x1f, }, 1694 { 97, 4125, 3000, 0x1e, }, 1695 { 98, 4125, 3000, 0x1d, }, 1696 { 99, 4125, 3000, 0x1c, }, 1697 { 100, 4125, 3000, 0x1b, }, 1698 { 101, 4125, 3000, 0x1a, }, 1699 { 102, 4125, 3000, 0x19, }, 1700 { 103, 4125, 3000, 0x18, }, 1701 { 104, 4125, 3000, 0x17, }, 1702 { 105, 4125, 3000, 0x16, }, 1703 { 106, 4125, 3000, 0x15, }, 1704 { 107, 4125, 3000, 0x14, }, 1705 { 108, 4125, 3000, 0x13, }, 1706 { 109, 4125, 3000, 0x12, }, 1707 { 110, 4125, 3000, 0x11, }, 1708 { 111, 4125, 3000, 0x10, }, 1709 { 112, 4125, 3000, 0x0f, }, 1710 { 113, 4125, 3000, 0x0e, }, 1711 { 114, 4125, 3000, 0x0d, }, 1712 { 115, 4125, 3000, 0x0c, }, 1713 { 116, 4125, 3000, 0x0b, }, 1714 { 117, 4125, 3000, 0x0a, }, 1715 { 118, 4125, 3000, 0x09, }, 1716 { 119, 4125, 3000, 0x08, }, 1717 { 120, 1125, 0, 0x07, }, 1718 { 121, 1000, 0, 0x06, }, 1719 { 122, 875, 0, 0x05, }, 1720 { 123, 750, 0, 0x04, }, 1721 { 124, 625, 0, 0x03, }, 1722 { 125, 500, 0, 0x02, }, 1723 { 126, 375, 0, 0x01, }, 1724 { 127, 0, 0, 0x00, }, 1725}; 1726 1727struct cparams { 1728 int i; 1729 int t; 1730 int m; 1731 int c; 1732}; 1733 1734static struct cparams cparams[] = { 1735 { 1, 1333, 301, 28664 }, 1736 { 1, 1066, 294, 24460 }, 1737 { 1, 800, 294, 25192 }, 1738 { 0, 1333, 276, 27605 }, 1739 { 0, 1066, 276, 27605 }, 1740 { 0, 800, 231, 23784 }, 1741}; 1742 1743unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 1744{ 1745 u64 total_count, diff, ret; 1746 u32 count1, count2, count3, m = 0, c = 0; 1747 unsigned long now = jiffies_to_msecs(jiffies), diff1; 1748 int i; 1749 1750 diff1 = now - dev_priv->last_time1; 1751 1752 count1 = I915_READ(DMIEC); 1753 count2 = I915_READ(DDREC); 1754 count3 = I915_READ(CSIEC); 1755 1756 total_count = count1 + count2 + count3; 1757 1758 /* FIXME: handle per-counter overflow */ 1759 if (total_count < dev_priv->last_count1) { 1760 diff = ~0UL - dev_priv->last_count1; 1761 diff += total_count; 1762 } else { 1763 diff = total_count - dev_priv->last_count1; 1764 } 1765 1766 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 1767 if (cparams[i].i == dev_priv->c_m && 1768 cparams[i].t == dev_priv->r_t) { 1769 m = cparams[i].m; 1770 c = cparams[i].c; 1771 break; 1772 } 1773 } 1774 1775 div_u64(diff, diff1); 1776 ret = ((m * diff) + c); 1777 div_u64(ret, 10); 1778 1779 dev_priv->last_count1 = total_count; 1780 dev_priv->last_time1 = now; 1781 1782 return ret; 1783} 1784 1785unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 1786{ 1787 unsigned long m, x, b; 1788 u32 tsfs; 1789 1790 tsfs = I915_READ(TSFS); 1791 1792 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 1793 x = I915_READ8(TR1); 1794 1795 b = tsfs & TSFS_INTR_MASK; 1796 1797 return ((m * x) / 127) - b; 1798} 1799 1800static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 1801{ 1802 unsigned long val = 0; 1803 int i; 1804 1805 for (i = 0; i < ARRAY_SIZE(v_table); i++) { 1806 if (v_table[i].pvid == pxvid) { 1807 if (IS_MOBILE(dev_priv->dev)) 1808 val = v_table[i].vm; 1809 else 1810 val = v_table[i].vd; 1811 } 1812 } 1813 1814 return val; 1815} 1816 1817void i915_update_gfx_val(struct drm_i915_private *dev_priv) 1818{ 1819 struct timespec now, diff1; 1820 u64 diff; 1821 unsigned long diffms; 1822 u32 count; 1823 1824 getrawmonotonic(&now); 1825 diff1 = timespec_sub(now, dev_priv->last_time2); 1826 1827 /* Don't divide by 0 */ 1828 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 1829 if (!diffms) 1830 return; 1831 1832 count = I915_READ(GFXEC); 1833 1834 if (count < dev_priv->last_count2) { 1835 diff = ~0UL - dev_priv->last_count2; 1836 diff += count; 1837 } else { 1838 diff = count - dev_priv->last_count2; 1839 } 1840 1841 dev_priv->last_count2 = count; 1842 dev_priv->last_time2 = now; 1843 1844 /* More magic constants... */ 1845 diff = diff * 1181; 1846 div_u64(diff, diffms * 10); 1847 dev_priv->gfx_power = diff; 1848} 1849 1850unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 1851{ 1852 unsigned long t, corr, state1, corr2, state2; 1853 u32 pxvid, ext_v; 1854 1855 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 1856 pxvid = (pxvid >> 24) & 0x7f; 1857 ext_v = pvid_to_extvid(dev_priv, pxvid); 1858 1859 state1 = ext_v; 1860 1861 t = i915_mch_val(dev_priv); 1862 1863 /* Revel in the empirically derived constants */ 1864 1865 /* Correction factor in 1/100000 units */ 1866 if (t > 80) 1867 corr = ((t * 2349) + 135940); 1868 else if (t >= 50) 1869 corr = ((t * 964) + 29317); 1870 else /* < 50 */ 1871 corr = ((t * 301) + 1004); 1872 1873 corr = corr * ((150142 * state1) / 10000 - 78642); 1874 corr /= 100000; 1875 corr2 = (corr * dev_priv->corr); 1876 1877 state2 = (corr2 * state1) / 10000; 1878 state2 /= 100; /* convert to mW */ 1879 1880 i915_update_gfx_val(dev_priv); 1881 1882 return dev_priv->gfx_power + state2; 1883} 1884 1885/* Global for IPS driver to get at the current i915 device */ 1886static struct drm_i915_private *i915_mch_dev; 1887/* 1888 * Lock protecting IPS related data structures 1889 * - i915_mch_dev 1890 * - dev_priv->max_delay 1891 * - dev_priv->min_delay 1892 * - dev_priv->fmax 1893 * - dev_priv->gpu_busy 1894 */ 1895DEFINE_SPINLOCK(mchdev_lock); 1896 1897/** 1898 * i915_read_mch_val - return value for IPS use 1899 * 1900 * Calculate and return a value for the IPS driver to use when deciding whether 1901 * we have thermal and power headroom to increase CPU or GPU power budget. 1902 */ 1903unsigned long i915_read_mch_val(void) 1904{ 1905 struct drm_i915_private *dev_priv; 1906 unsigned long chipset_val, graphics_val, ret = 0; 1907 1908 spin_lock(&mchdev_lock); 1909 if (!i915_mch_dev) 1910 goto out_unlock; 1911 dev_priv = i915_mch_dev; 1912 1913 chipset_val = i915_chipset_val(dev_priv); 1914 graphics_val = i915_gfx_val(dev_priv); 1915 1916 ret = chipset_val + graphics_val; 1917 1918out_unlock: 1919 spin_unlock(&mchdev_lock); 1920 1921 return ret; 1922} 1923EXPORT_SYMBOL_GPL(i915_read_mch_val); 1924 1925/** 1926 * i915_gpu_raise - raise GPU frequency limit 1927 * 1928 * Raise the limit; IPS indicates we have thermal headroom. 1929 */ 1930bool i915_gpu_raise(void) 1931{ 1932 struct drm_i915_private *dev_priv; 1933 bool ret = true; 1934 1935 spin_lock(&mchdev_lock); 1936 if (!i915_mch_dev) { 1937 ret = false; 1938 goto out_unlock; 1939 } 1940 dev_priv = i915_mch_dev; 1941 1942 if (dev_priv->max_delay > dev_priv->fmax) 1943 dev_priv->max_delay--; 1944 1945out_unlock: 1946 spin_unlock(&mchdev_lock); 1947 1948 return ret; 1949} 1950EXPORT_SYMBOL_GPL(i915_gpu_raise); 1951 1952/** 1953 * i915_gpu_lower - lower GPU frequency limit 1954 * 1955 * IPS indicates we're close to a thermal limit, so throttle back the GPU 1956 * frequency maximum. 1957 */ 1958bool i915_gpu_lower(void) 1959{ 1960 struct drm_i915_private *dev_priv; 1961 bool ret = true; 1962 1963 spin_lock(&mchdev_lock); 1964 if (!i915_mch_dev) { 1965 ret = false; 1966 goto out_unlock; 1967 } 1968 dev_priv = i915_mch_dev; 1969 1970 if (dev_priv->max_delay < dev_priv->min_delay) 1971 dev_priv->max_delay++; 1972 1973out_unlock: 1974 spin_unlock(&mchdev_lock); 1975 1976 return ret; 1977} 1978EXPORT_SYMBOL_GPL(i915_gpu_lower); 1979 1980/** 1981 * i915_gpu_busy - indicate GPU business to IPS 1982 * 1983 * Tell the IPS driver whether or not the GPU is busy. 1984 */ 1985bool i915_gpu_busy(void) 1986{ 1987 struct drm_i915_private *dev_priv; 1988 bool ret = false; 1989 1990 spin_lock(&mchdev_lock); 1991 if (!i915_mch_dev) 1992 goto out_unlock; 1993 dev_priv = i915_mch_dev; 1994 1995 ret = dev_priv->busy; 1996 1997out_unlock: 1998 spin_unlock(&mchdev_lock); 1999 2000 return ret; 2001} 2002EXPORT_SYMBOL_GPL(i915_gpu_busy); 2003 2004/** 2005 * i915_gpu_turbo_disable - disable graphics turbo 2006 * 2007 * Disable graphics turbo by resetting the max frequency and setting the 2008 * current frequency to the default. 2009 */ 2010bool i915_gpu_turbo_disable(void) 2011{ 2012 struct drm_i915_private *dev_priv; 2013 bool ret = true; 2014 2015 spin_lock(&mchdev_lock); 2016 if (!i915_mch_dev) { 2017 ret = false; 2018 goto out_unlock; 2019 } 2020 dev_priv = i915_mch_dev; 2021 2022 dev_priv->max_delay = dev_priv->fstart; 2023 2024 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 2025 ret = false; 2026 2027out_unlock: 2028 spin_unlock(&mchdev_lock); 2029 2030 return ret; 2031} 2032EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 2033 2034/** 2035 * i915_driver_load - setup chip and create an initial config 2036 * @dev: DRM device 2037 * @flags: startup flags 2038 * 2039 * The driver load routine has to do several things: 2040 * - drive output discovery via intel_modeset_init() 2041 * - initialize the memory manager 2042 * - allocate initial config memory 2043 * - setup the DRM framebuffer with the allocated memory 2044 */ 2045int i915_driver_load(struct drm_device *dev, unsigned long flags) 2046{ 2047 struct drm_i915_private *dev_priv; 2048 resource_size_t base, size; 2049 int ret = 0, mmio_bar; 2050 uint32_t agp_size, prealloc_size, prealloc_start; 2051 /* i915 has 4 more counters */ 2052 dev->counters += 4; 2053 dev->types[6] = _DRM_STAT_IRQ; 2054 dev->types[7] = _DRM_STAT_PRIMARY; 2055 dev->types[8] = _DRM_STAT_SECONDARY; 2056 dev->types[9] = _DRM_STAT_DMA; 2057 2058 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); 2059 if (dev_priv == NULL) 2060 return -ENOMEM; 2061 2062 dev->dev_private = (void *)dev_priv; 2063 dev_priv->dev = dev; 2064 dev_priv->info = (struct intel_device_info *) flags; 2065 2066 /* Add register map (needed for suspend/resume) */ 2067 mmio_bar = IS_I9XX(dev) ? 0 : 1; 2068 base = pci_resource_start(dev->pdev, mmio_bar); 2069 size = pci_resource_len(dev->pdev, mmio_bar); 2070 2071 if (i915_get_bridge_dev(dev)) { 2072 ret = -EIO; 2073 goto free_priv; 2074 } 2075 2076 dev_priv->regs = ioremap(base, size); 2077 if (!dev_priv->regs) { 2078 DRM_ERROR("failed to map registers\n"); 2079 ret = -EIO; 2080 goto put_bridge; 2081 } 2082 2083 dev_priv->mm.gtt_mapping = 2084 io_mapping_create_wc(dev->agp->base, 2085 dev->agp->agp_info.aper_size * 1024*1024); 2086 if (dev_priv->mm.gtt_mapping == NULL) { 2087 ret = -EIO; 2088 goto out_rmmap; 2089 } 2090 2091 /* Set up a WC MTRR for non-PAT systems. This is more common than 2092 * one would think, because the kernel disables PAT on first 2093 * generation Core chips because WC PAT gets overridden by a UC 2094 * MTRR if present. Even if a UC MTRR isn't present. 2095 */ 2096 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 2097 dev->agp->agp_info.aper_size * 2098 1024 * 1024, 2099 MTRR_TYPE_WRCOMB, 1); 2100 if (dev_priv->mm.gtt_mtrr < 0) { 2101 DRM_INFO("MTRR allocation failed. Graphics " 2102 "performance may suffer.\n"); 2103 } 2104 2105 ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); 2106 if (ret) 2107 goto out_iomapfree; 2108 2109 dev_priv->wq = create_singlethread_workqueue("i915"); 2110 if (dev_priv->wq == NULL) { 2111 DRM_ERROR("Failed to create our workqueue.\n"); 2112 ret = -ENOMEM; 2113 goto out_iomapfree; 2114 } 2115 2116 /* enable GEM by default */ 2117 dev_priv->has_gem = 1; 2118 2119 if (prealloc_size > agp_size * 3 / 4) { 2120 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " 2121 "memory stolen.\n", 2122 prealloc_size / 1024, agp_size / 1024); 2123 DRM_ERROR("Disabling GEM. (try reducing stolen memory or " 2124 "updating the BIOS to fix).\n"); 2125 dev_priv->has_gem = 0; 2126 } 2127 2128 if (dev_priv->has_gem == 0 && 2129 drm_core_check_feature(dev, DRIVER_MODESET)) { 2130 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); 2131 ret = -ENODEV; 2132 goto out_iomapfree; 2133 } 2134 2135 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2136 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2137 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 2138 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2139 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2140 } 2141 2142 /* Try to make sure MCHBAR is enabled before poking at it */ 2143 intel_setup_mchbar(dev); 2144 2145 i915_gem_load(dev); 2146 2147 /* Init HWS */ 2148 if (!I915_NEED_GFX_HWS(dev)) { 2149 ret = i915_init_phys_hws(dev); 2150 if (ret != 0) 2151 goto out_workqueue_free; 2152 } 2153 2154 if (IS_PINEVIEW(dev)) 2155 i915_pineview_get_mem_freq(dev); 2156 else if (IS_IRONLAKE(dev)) 2157 i915_ironlake_get_mem_freq(dev); 2158 2159 /* On the 945G/GM, the chipset reports the MSI capability on the 2160 * integrated graphics even though the support isn't actually there 2161 * according to the published specs. It doesn't appear to function 2162 * correctly in testing on 945G. 2163 * This may be a side effect of MSI having been made available for PEG 2164 * and the registers being closely associated. 2165 * 2166 * According to chipset errata, on the 965GM, MSI interrupts may 2167 * be lost or delayed, but we use them anyways to avoid 2168 * stuck interrupts on some machines. 2169 */ 2170 if (!IS_I945G(dev) && !IS_I945GM(dev)) 2171 pci_enable_msi(dev->pdev); 2172 2173 spin_lock_init(&dev_priv->user_irq_lock); 2174 spin_lock_init(&dev_priv->error_lock); 2175 dev_priv->trace_irq_seqno = 0; 2176 2177 ret = drm_vblank_init(dev, I915_NUM_PIPE); 2178 2179 if (ret) { 2180 (void) i915_driver_unload(dev); 2181 return ret; 2182 } 2183 2184 /* Start out suspended */ 2185 dev_priv->mm.suspended = 1; 2186 2187 intel_detect_pch(dev); 2188 2189 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2190 ret = i915_load_modeset_init(dev, prealloc_start, 2191 prealloc_size, agp_size); 2192 if (ret < 0) { 2193 DRM_ERROR("failed to init modeset\n"); 2194 goto out_workqueue_free; 2195 } 2196 } 2197 2198 /* Must be done after probing outputs */ 2199 intel_opregion_init(dev, 0); 2200 2201 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2202 (unsigned long) dev); 2203 2204 spin_lock(&mchdev_lock); 2205 i915_mch_dev = dev_priv; 2206 dev_priv->mchdev_lock = &mchdev_lock; 2207 spin_unlock(&mchdev_lock); 2208 2209 return 0; 2210 2211out_workqueue_free: 2212 destroy_workqueue(dev_priv->wq); 2213out_iomapfree: 2214 io_mapping_free(dev_priv->mm.gtt_mapping); 2215out_rmmap: 2216 iounmap(dev_priv->regs); 2217put_bridge: 2218 pci_dev_put(dev_priv->bridge_dev); 2219free_priv: 2220 kfree(dev_priv); 2221 return ret; 2222} 2223 2224int i915_driver_unload(struct drm_device *dev) 2225{ 2226 struct drm_i915_private *dev_priv = dev->dev_private; 2227 2228 i915_destroy_error_state(dev); 2229 2230 spin_lock(&mchdev_lock); 2231 i915_mch_dev = NULL; 2232 spin_unlock(&mchdev_lock); 2233 2234 destroy_workqueue(dev_priv->wq); 2235 del_timer_sync(&dev_priv->hangcheck_timer); 2236 2237 io_mapping_free(dev_priv->mm.gtt_mapping); 2238 if (dev_priv->mm.gtt_mtrr >= 0) { 2239 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 2240 dev->agp->agp_info.aper_size * 1024 * 1024); 2241 dev_priv->mm.gtt_mtrr = -1; 2242 } 2243 2244 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2245 intel_modeset_cleanup(dev); 2246 2247 /* 2248 * free the memory space allocated for the child device 2249 * config parsed from VBT 2250 */ 2251 if (dev_priv->child_dev && dev_priv->child_dev_num) { 2252 kfree(dev_priv->child_dev); 2253 dev_priv->child_dev = NULL; 2254 dev_priv->child_dev_num = 0; 2255 } 2256 drm_irq_uninstall(dev); 2257 vga_switcheroo_unregister_client(dev->pdev); 2258 vga_client_register(dev->pdev, NULL, NULL, NULL); 2259 } 2260 2261 if (dev->pdev->msi_enabled) 2262 pci_disable_msi(dev->pdev); 2263 2264 if (dev_priv->regs != NULL) 2265 iounmap(dev_priv->regs); 2266 2267 intel_opregion_free(dev, 0); 2268 2269 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2270 i915_gem_free_all_phys_object(dev); 2271 2272 mutex_lock(&dev->struct_mutex); 2273 i915_gem_cleanup_ringbuffer(dev); 2274 mutex_unlock(&dev->struct_mutex); 2275 if (I915_HAS_FBC(dev) && i915_powersave) 2276 i915_cleanup_compression(dev); 2277 drm_mm_takedown(&dev_priv->vram); 2278 i915_gem_lastclose(dev); 2279 2280 intel_cleanup_overlay(dev); 2281 } 2282 2283 intel_teardown_mchbar(dev); 2284 2285 pci_dev_put(dev_priv->bridge_dev); 2286 kfree(dev->dev_private); 2287 2288 return 0; 2289} 2290 2291int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 2292{ 2293 struct drm_i915_file_private *i915_file_priv; 2294 2295 DRM_DEBUG_DRIVER("\n"); 2296 i915_file_priv = (struct drm_i915_file_private *) 2297 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); 2298 2299 if (!i915_file_priv) 2300 return -ENOMEM; 2301 2302 file_priv->driver_priv = i915_file_priv; 2303 2304 INIT_LIST_HEAD(&i915_file_priv->mm.request_list); 2305 2306 return 0; 2307} 2308 2309/** 2310 * i915_driver_lastclose - clean up after all DRM clients have exited 2311 * @dev: DRM device 2312 * 2313 * Take care of cleaning up after all DRM clients have exited. In the 2314 * mode setting case, we want to restore the kernel's initial mode (just 2315 * in case the last client left us in a bad state). 2316 * 2317 * Additionally, in the non-mode setting case, we'll tear down the AGP 2318 * and DMA structures, since the kernel won't be using them, and clea 2319 * up any GEM state. 2320 */ 2321void i915_driver_lastclose(struct drm_device * dev) 2322{ 2323 drm_i915_private_t *dev_priv = dev->dev_private; 2324 2325 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 2326 drm_fb_helper_restore(); 2327 vga_switcheroo_process_delayed_switch(); 2328 return; 2329 } 2330 2331 i915_gem_lastclose(dev); 2332 2333 if (dev_priv->agp_heap) 2334 i915_mem_takedown(&(dev_priv->agp_heap)); 2335 2336 i915_dma_cleanup(dev); 2337} 2338 2339void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 2340{ 2341 drm_i915_private_t *dev_priv = dev->dev_private; 2342 i915_gem_release(dev, file_priv); 2343 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2344 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 2345} 2346 2347void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 2348{ 2349 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 2350 2351 kfree(i915_file_priv); 2352} 2353 2354struct drm_ioctl_desc i915_ioctls[] = { 2355 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2356 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 2357 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 2358 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 2359 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 2360 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 2361 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 2362 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2363 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), 2364 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), 2365 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2366 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 2367 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 2368 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 2369 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 2370 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 2371 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2372 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2373 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 2374 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 2375 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2376 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2377 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 2378 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 2379 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2380 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2381 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 2382 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 2383 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 2384 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 2385 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 2386 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 2387 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 2388 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 2389 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 2390 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 2391 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 2392 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2393 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2394 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2395}; 2396 2397int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2398 2399/** 2400 * Determine if the device really is AGP or not. 2401 * 2402 * All Intel graphics chipsets are treated as AGP, even if they are really 2403 * PCI-e. 2404 * 2405 * \param dev The device to be tested. 2406 * 2407 * \returns 2408 * A value of 1 is always retured to indictate every i9x5 is AGP. 2409 */ 2410int i915_driver_device_is_agp(struct drm_device * dev) 2411{ 2412 return 1; 2413} 2414