i915_dma.c revision 8d608aa6295242fe4c4b6105b8c59c6a5b232d89
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include "drmP.h" 30#include "drm.h" 31#include "drm_crtc_helper.h" 32#include "drm_fb_helper.h" 33#include "intel_drv.h" 34#include "i915_drm.h" 35#include "i915_drv.h" 36#include "i915_trace.h" 37#include "../../../platform/x86/intel_ips.h" 38#include <linux/pci.h> 39#include <linux/vgaarb.h> 40#include <linux/acpi.h> 41#include <linux/pnp.h> 42#include <linux/vga_switcheroo.h> 43#include <linux/slab.h> 44#include <acpi/video.h> 45 46/** 47 * Sets up the hardware status page for devices that need a physical address 48 * in the register. 49 */ 50static int i915_init_phys_hws(struct drm_device *dev) 51{ 52 drm_i915_private_t *dev_priv = dev->dev_private; 53 struct intel_ring_buffer *ring = LP_RING(dev_priv); 54 55 /* Program Hardware Status Page */ 56 dev_priv->status_page_dmah = 57 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 58 59 if (!dev_priv->status_page_dmah) { 60 DRM_ERROR("Can not allocate hardware status page\n"); 61 return -ENOMEM; 62 } 63 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 64 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 65 66 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 67 68 if (INTEL_INFO(dev)->gen >= 4) 69 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 70 0xf0; 71 72 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 73 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 74 return 0; 75} 76 77/** 78 * Frees the hardware status page, whether it's a physical address or a virtual 79 * address set up by the X Server. 80 */ 81static void i915_free_hws(struct drm_device *dev) 82{ 83 drm_i915_private_t *dev_priv = dev->dev_private; 84 struct intel_ring_buffer *ring = LP_RING(dev_priv); 85 86 if (dev_priv->status_page_dmah) { 87 drm_pci_free(dev, dev_priv->status_page_dmah); 88 dev_priv->status_page_dmah = NULL; 89 } 90 91 if (ring->status_page.gfx_addr) { 92 ring->status_page.gfx_addr = 0; 93 drm_core_ioremapfree(&dev_priv->hws_map, dev); 94 } 95 96 /* Need to rewrite hardware status page */ 97 I915_WRITE(HWS_PGA, 0x1ffff000); 98} 99 100void i915_kernel_lost_context(struct drm_device * dev) 101{ 102 drm_i915_private_t *dev_priv = dev->dev_private; 103 struct drm_i915_master_private *master_priv; 104 struct intel_ring_buffer *ring = LP_RING(dev_priv); 105 106 /* 107 * We should never lose context on the ring with modesetting 108 * as we don't expose it to userspace 109 */ 110 if (drm_core_check_feature(dev, DRIVER_MODESET)) 111 return; 112 113 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 114 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 115 ring->space = ring->head - (ring->tail + 8); 116 if (ring->space < 0) 117 ring->space += ring->size; 118 119 if (!dev->primary->master) 120 return; 121 122 master_priv = dev->primary->master->driver_priv; 123 if (ring->head == ring->tail && master_priv->sarea_priv) 124 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 125} 126 127static int i915_dma_cleanup(struct drm_device * dev) 128{ 129 drm_i915_private_t *dev_priv = dev->dev_private; 130 int i; 131 132 /* Make sure interrupts are disabled here because the uninstall ioctl 133 * may not have been called from userspace and after dev_private 134 * is freed, it's too late. 135 */ 136 if (dev->irq_enabled) 137 drm_irq_uninstall(dev); 138 139 mutex_lock(&dev->struct_mutex); 140 for (i = 0; i < I915_NUM_RINGS; i++) 141 intel_cleanup_ring_buffer(&dev_priv->ring[i]); 142 mutex_unlock(&dev->struct_mutex); 143 144 /* Clear the HWS virtual address at teardown */ 145 if (I915_NEED_GFX_HWS(dev)) 146 i915_free_hws(dev); 147 148 return 0; 149} 150 151static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 152{ 153 drm_i915_private_t *dev_priv = dev->dev_private; 154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 155 struct intel_ring_buffer *ring = LP_RING(dev_priv); 156 157 master_priv->sarea = drm_getsarea(dev); 158 if (master_priv->sarea) { 159 master_priv->sarea_priv = (drm_i915_sarea_t *) 160 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 161 } else { 162 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); 163 } 164 165 if (init->ring_size != 0) { 166 if (ring->obj != NULL) { 167 i915_dma_cleanup(dev); 168 DRM_ERROR("Client tried to initialize ringbuffer in " 169 "GEM mode\n"); 170 return -EINVAL; 171 } 172 173 ring->size = init->ring_size; 174 175 ring->map.offset = init->ring_start; 176 ring->map.size = init->ring_size; 177 ring->map.type = 0; 178 ring->map.flags = 0; 179 ring->map.mtrr = 0; 180 181 drm_core_ioremap_wc(&ring->map, dev); 182 183 if (ring->map.handle == NULL) { 184 i915_dma_cleanup(dev); 185 DRM_ERROR("can not ioremap virtual address for" 186 " ring buffer\n"); 187 return -ENOMEM; 188 } 189 } 190 191 ring->virtual_start = ring->map.handle; 192 193 dev_priv->cpp = init->cpp; 194 dev_priv->back_offset = init->back_offset; 195 dev_priv->front_offset = init->front_offset; 196 dev_priv->current_page = 0; 197 if (master_priv->sarea_priv) 198 master_priv->sarea_priv->pf_current_page = 0; 199 200 /* Allow hardware batchbuffers unless told otherwise. 201 */ 202 dev_priv->allow_batchbuffer = 1; 203 204 return 0; 205} 206 207static int i915_dma_resume(struct drm_device * dev) 208{ 209 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 210 struct intel_ring_buffer *ring = LP_RING(dev_priv); 211 212 DRM_DEBUG_DRIVER("%s\n", __func__); 213 214 if (ring->map.handle == NULL) { 215 DRM_ERROR("can not ioremap virtual address for" 216 " ring buffer\n"); 217 return -ENOMEM; 218 } 219 220 /* Program Hardware Status Page */ 221 if (!ring->status_page.page_addr) { 222 DRM_ERROR("Can not find hardware status page\n"); 223 return -EINVAL; 224 } 225 DRM_DEBUG_DRIVER("hw status page @ %p\n", 226 ring->status_page.page_addr); 227 if (ring->status_page.gfx_addr != 0) 228 intel_ring_setup_status_page(ring); 229 else 230 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 231 232 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 233 234 return 0; 235} 236 237static int i915_dma_init(struct drm_device *dev, void *data, 238 struct drm_file *file_priv) 239{ 240 drm_i915_init_t *init = data; 241 int retcode = 0; 242 243 switch (init->func) { 244 case I915_INIT_DMA: 245 retcode = i915_initialize(dev, init); 246 break; 247 case I915_CLEANUP_DMA: 248 retcode = i915_dma_cleanup(dev); 249 break; 250 case I915_RESUME_DMA: 251 retcode = i915_dma_resume(dev); 252 break; 253 default: 254 retcode = -EINVAL; 255 break; 256 } 257 258 return retcode; 259} 260 261/* Implement basically the same security restrictions as hardware does 262 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 263 * 264 * Most of the calculations below involve calculating the size of a 265 * particular instruction. It's important to get the size right as 266 * that tells us where the next instruction to check is. Any illegal 267 * instruction detected will be given a size of zero, which is a 268 * signal to abort the rest of the buffer. 269 */ 270static int validate_cmd(int cmd) 271{ 272 switch (((cmd >> 29) & 0x7)) { 273 case 0x0: 274 switch ((cmd >> 23) & 0x3f) { 275 case 0x0: 276 return 1; /* MI_NOOP */ 277 case 0x4: 278 return 1; /* MI_FLUSH */ 279 default: 280 return 0; /* disallow everything else */ 281 } 282 break; 283 case 0x1: 284 return 0; /* reserved */ 285 case 0x2: 286 return (cmd & 0xff) + 2; /* 2d commands */ 287 case 0x3: 288 if (((cmd >> 24) & 0x1f) <= 0x18) 289 return 1; 290 291 switch ((cmd >> 24) & 0x1f) { 292 case 0x1c: 293 return 1; 294 case 0x1d: 295 switch ((cmd >> 16) & 0xff) { 296 case 0x3: 297 return (cmd & 0x1f) + 2; 298 case 0x4: 299 return (cmd & 0xf) + 2; 300 default: 301 return (cmd & 0xffff) + 2; 302 } 303 case 0x1e: 304 if (cmd & (1 << 23)) 305 return (cmd & 0xffff) + 1; 306 else 307 return 1; 308 case 0x1f: 309 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 310 return (cmd & 0x1ffff) + 2; 311 else if (cmd & (1 << 17)) /* indirect random */ 312 if ((cmd & 0xffff) == 0) 313 return 0; /* unknown length, too hard */ 314 else 315 return (((cmd & 0xffff) + 1) / 2) + 1; 316 else 317 return 2; /* indirect sequential */ 318 default: 319 return 0; 320 } 321 default: 322 return 0; 323 } 324 325 return 0; 326} 327 328static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 329{ 330 drm_i915_private_t *dev_priv = dev->dev_private; 331 int i, ret; 332 333 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 334 return -EINVAL; 335 336 for (i = 0; i < dwords;) { 337 int sz = validate_cmd(buffer[i]); 338 if (sz == 0 || i + sz > dwords) 339 return -EINVAL; 340 i += sz; 341 } 342 343 ret = BEGIN_LP_RING((dwords+1)&~1); 344 if (ret) 345 return ret; 346 347 for (i = 0; i < dwords; i++) 348 OUT_RING(buffer[i]); 349 if (dwords & 1) 350 OUT_RING(0); 351 352 ADVANCE_LP_RING(); 353 354 return 0; 355} 356 357int 358i915_emit_box(struct drm_device *dev, 359 struct drm_clip_rect *box, 360 int DR1, int DR4) 361{ 362 struct drm_i915_private *dev_priv = dev->dev_private; 363 int ret; 364 365 if (box->y2 <= box->y1 || box->x2 <= box->x1 || 366 box->y2 <= 0 || box->x2 <= 0) { 367 DRM_ERROR("Bad box %d,%d..%d,%d\n", 368 box->x1, box->y1, box->x2, box->y2); 369 return -EINVAL; 370 } 371 372 if (INTEL_INFO(dev)->gen >= 4) { 373 ret = BEGIN_LP_RING(4); 374 if (ret) 375 return ret; 376 377 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 378 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 379 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 380 OUT_RING(DR4); 381 } else { 382 ret = BEGIN_LP_RING(6); 383 if (ret) 384 return ret; 385 386 OUT_RING(GFX_OP_DRAWRECT_INFO); 387 OUT_RING(DR1); 388 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 389 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 390 OUT_RING(DR4); 391 OUT_RING(0); 392 } 393 ADVANCE_LP_RING(); 394 395 return 0; 396} 397 398/* XXX: Emitting the counter should really be moved to part of the IRQ 399 * emit. For now, do it in both places: 400 */ 401 402static void i915_emit_breadcrumb(struct drm_device *dev) 403{ 404 drm_i915_private_t *dev_priv = dev->dev_private; 405 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 406 407 dev_priv->counter++; 408 if (dev_priv->counter > 0x7FFFFFFFUL) 409 dev_priv->counter = 0; 410 if (master_priv->sarea_priv) 411 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 412 413 if (BEGIN_LP_RING(4) == 0) { 414 OUT_RING(MI_STORE_DWORD_INDEX); 415 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 416 OUT_RING(dev_priv->counter); 417 OUT_RING(0); 418 ADVANCE_LP_RING(); 419 } 420} 421 422static int i915_dispatch_cmdbuffer(struct drm_device * dev, 423 drm_i915_cmdbuffer_t *cmd, 424 struct drm_clip_rect *cliprects, 425 void *cmdbuf) 426{ 427 int nbox = cmd->num_cliprects; 428 int i = 0, count, ret; 429 430 if (cmd->sz & 0x3) { 431 DRM_ERROR("alignment"); 432 return -EINVAL; 433 } 434 435 i915_kernel_lost_context(dev); 436 437 count = nbox ? nbox : 1; 438 439 for (i = 0; i < count; i++) { 440 if (i < nbox) { 441 ret = i915_emit_box(dev, &cliprects[i], 442 cmd->DR1, cmd->DR4); 443 if (ret) 444 return ret; 445 } 446 447 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 448 if (ret) 449 return ret; 450 } 451 452 i915_emit_breadcrumb(dev); 453 return 0; 454} 455 456static int i915_dispatch_batchbuffer(struct drm_device * dev, 457 drm_i915_batchbuffer_t * batch, 458 struct drm_clip_rect *cliprects) 459{ 460 struct drm_i915_private *dev_priv = dev->dev_private; 461 int nbox = batch->num_cliprects; 462 int i, count, ret; 463 464 if ((batch->start | batch->used) & 0x7) { 465 DRM_ERROR("alignment"); 466 return -EINVAL; 467 } 468 469 i915_kernel_lost_context(dev); 470 471 count = nbox ? nbox : 1; 472 for (i = 0; i < count; i++) { 473 if (i < nbox) { 474 ret = i915_emit_box(dev, &cliprects[i], 475 batch->DR1, batch->DR4); 476 if (ret) 477 return ret; 478 } 479 480 if (!IS_I830(dev) && !IS_845G(dev)) { 481 ret = BEGIN_LP_RING(2); 482 if (ret) 483 return ret; 484 485 if (INTEL_INFO(dev)->gen >= 4) { 486 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 487 OUT_RING(batch->start); 488 } else { 489 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 490 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 491 } 492 } else { 493 ret = BEGIN_LP_RING(4); 494 if (ret) 495 return ret; 496 497 OUT_RING(MI_BATCH_BUFFER); 498 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 499 OUT_RING(batch->start + batch->used - 4); 500 OUT_RING(0); 501 } 502 ADVANCE_LP_RING(); 503 } 504 505 506 if (IS_G4X(dev) || IS_GEN5(dev)) { 507 if (BEGIN_LP_RING(2) == 0) { 508 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); 509 OUT_RING(MI_NOOP); 510 ADVANCE_LP_RING(); 511 } 512 } 513 514 i915_emit_breadcrumb(dev); 515 return 0; 516} 517 518static int i915_dispatch_flip(struct drm_device * dev) 519{ 520 drm_i915_private_t *dev_priv = dev->dev_private; 521 struct drm_i915_master_private *master_priv = 522 dev->primary->master->driver_priv; 523 int ret; 524 525 if (!master_priv->sarea_priv) 526 return -EINVAL; 527 528 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 529 __func__, 530 dev_priv->current_page, 531 master_priv->sarea_priv->pf_current_page); 532 533 i915_kernel_lost_context(dev); 534 535 ret = BEGIN_LP_RING(10); 536 if (ret) 537 return ret; 538 539 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 540 OUT_RING(0); 541 542 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 543 OUT_RING(0); 544 if (dev_priv->current_page == 0) { 545 OUT_RING(dev_priv->back_offset); 546 dev_priv->current_page = 1; 547 } else { 548 OUT_RING(dev_priv->front_offset); 549 dev_priv->current_page = 0; 550 } 551 OUT_RING(0); 552 553 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 554 OUT_RING(0); 555 556 ADVANCE_LP_RING(); 557 558 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 559 560 if (BEGIN_LP_RING(4) == 0) { 561 OUT_RING(MI_STORE_DWORD_INDEX); 562 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 563 OUT_RING(dev_priv->counter); 564 OUT_RING(0); 565 ADVANCE_LP_RING(); 566 } 567 568 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 569 return 0; 570} 571 572static int i915_quiescent(struct drm_device *dev) 573{ 574 struct intel_ring_buffer *ring = LP_RING(dev->dev_private); 575 576 i915_kernel_lost_context(dev); 577 return intel_wait_ring_buffer(ring, ring->size - 8); 578} 579 580static int i915_flush_ioctl(struct drm_device *dev, void *data, 581 struct drm_file *file_priv) 582{ 583 int ret; 584 585 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 586 587 mutex_lock(&dev->struct_mutex); 588 ret = i915_quiescent(dev); 589 mutex_unlock(&dev->struct_mutex); 590 591 return ret; 592} 593 594static int i915_batchbuffer(struct drm_device *dev, void *data, 595 struct drm_file *file_priv) 596{ 597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 598 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 599 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 600 master_priv->sarea_priv; 601 drm_i915_batchbuffer_t *batch = data; 602 int ret; 603 struct drm_clip_rect *cliprects = NULL; 604 605 if (!dev_priv->allow_batchbuffer) { 606 DRM_ERROR("Batchbuffer ioctl disabled\n"); 607 return -EINVAL; 608 } 609 610 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", 611 batch->start, batch->used, batch->num_cliprects); 612 613 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 614 615 if (batch->num_cliprects < 0) 616 return -EINVAL; 617 618 if (batch->num_cliprects) { 619 cliprects = kcalloc(batch->num_cliprects, 620 sizeof(struct drm_clip_rect), 621 GFP_KERNEL); 622 if (cliprects == NULL) 623 return -ENOMEM; 624 625 ret = copy_from_user(cliprects, batch->cliprects, 626 batch->num_cliprects * 627 sizeof(struct drm_clip_rect)); 628 if (ret != 0) { 629 ret = -EFAULT; 630 goto fail_free; 631 } 632 } 633 634 mutex_lock(&dev->struct_mutex); 635 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 636 mutex_unlock(&dev->struct_mutex); 637 638 if (sarea_priv) 639 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 640 641fail_free: 642 kfree(cliprects); 643 644 return ret; 645} 646 647static int i915_cmdbuffer(struct drm_device *dev, void *data, 648 struct drm_file *file_priv) 649{ 650 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 651 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 652 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 653 master_priv->sarea_priv; 654 drm_i915_cmdbuffer_t *cmdbuf = data; 655 struct drm_clip_rect *cliprects = NULL; 656 void *batch_data; 657 int ret; 658 659 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 660 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 661 662 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 663 664 if (cmdbuf->num_cliprects < 0) 665 return -EINVAL; 666 667 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); 668 if (batch_data == NULL) 669 return -ENOMEM; 670 671 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 672 if (ret != 0) { 673 ret = -EFAULT; 674 goto fail_batch_free; 675 } 676 677 if (cmdbuf->num_cliprects) { 678 cliprects = kcalloc(cmdbuf->num_cliprects, 679 sizeof(struct drm_clip_rect), GFP_KERNEL); 680 if (cliprects == NULL) { 681 ret = -ENOMEM; 682 goto fail_batch_free; 683 } 684 685 ret = copy_from_user(cliprects, cmdbuf->cliprects, 686 cmdbuf->num_cliprects * 687 sizeof(struct drm_clip_rect)); 688 if (ret != 0) { 689 ret = -EFAULT; 690 goto fail_clip_free; 691 } 692 } 693 694 mutex_lock(&dev->struct_mutex); 695 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 696 mutex_unlock(&dev->struct_mutex); 697 if (ret) { 698 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 699 goto fail_clip_free; 700 } 701 702 if (sarea_priv) 703 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 704 705fail_clip_free: 706 kfree(cliprects); 707fail_batch_free: 708 kfree(batch_data); 709 710 return ret; 711} 712 713static int i915_flip_bufs(struct drm_device *dev, void *data, 714 struct drm_file *file_priv) 715{ 716 int ret; 717 718 DRM_DEBUG_DRIVER("%s\n", __func__); 719 720 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 721 722 mutex_lock(&dev->struct_mutex); 723 ret = i915_dispatch_flip(dev); 724 mutex_unlock(&dev->struct_mutex); 725 726 return ret; 727} 728 729static int i915_getparam(struct drm_device *dev, void *data, 730 struct drm_file *file_priv) 731{ 732 drm_i915_private_t *dev_priv = dev->dev_private; 733 drm_i915_getparam_t *param = data; 734 int value; 735 736 if (!dev_priv) { 737 DRM_ERROR("called with no initialization\n"); 738 return -EINVAL; 739 } 740 741 switch (param->param) { 742 case I915_PARAM_IRQ_ACTIVE: 743 value = dev->pdev->irq ? 1 : 0; 744 break; 745 case I915_PARAM_ALLOW_BATCHBUFFER: 746 value = dev_priv->allow_batchbuffer ? 1 : 0; 747 break; 748 case I915_PARAM_LAST_DISPATCH: 749 value = READ_BREADCRUMB(dev_priv); 750 break; 751 case I915_PARAM_CHIPSET_ID: 752 value = dev->pci_device; 753 break; 754 case I915_PARAM_HAS_GEM: 755 value = dev_priv->has_gem; 756 break; 757 case I915_PARAM_NUM_FENCES_AVAIL: 758 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 759 break; 760 case I915_PARAM_HAS_OVERLAY: 761 value = dev_priv->overlay ? 1 : 0; 762 break; 763 case I915_PARAM_HAS_PAGEFLIPPING: 764 value = 1; 765 break; 766 case I915_PARAM_HAS_EXECBUF2: 767 /* depends on GEM */ 768 value = dev_priv->has_gem; 769 break; 770 case I915_PARAM_HAS_BSD: 771 value = HAS_BSD(dev); 772 break; 773 case I915_PARAM_HAS_BLT: 774 value = HAS_BLT(dev); 775 break; 776 case I915_PARAM_HAS_RELAXED_FENCING: 777 value = 1; 778 break; 779 case I915_PARAM_HAS_COHERENT_RINGS: 780 value = 1; 781 break; 782 case I915_PARAM_HAS_EXEC_CONSTANTS: 783 value = INTEL_INFO(dev)->gen >= 4; 784 break; 785 default: 786 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 787 param->param); 788 return -EINVAL; 789 } 790 791 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 792 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 793 return -EFAULT; 794 } 795 796 return 0; 797} 798 799static int i915_setparam(struct drm_device *dev, void *data, 800 struct drm_file *file_priv) 801{ 802 drm_i915_private_t *dev_priv = dev->dev_private; 803 drm_i915_setparam_t *param = data; 804 805 if (!dev_priv) { 806 DRM_ERROR("called with no initialization\n"); 807 return -EINVAL; 808 } 809 810 switch (param->param) { 811 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 812 break; 813 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 814 dev_priv->tex_lru_log_granularity = param->value; 815 break; 816 case I915_SETPARAM_ALLOW_BATCHBUFFER: 817 dev_priv->allow_batchbuffer = param->value; 818 break; 819 case I915_SETPARAM_NUM_USED_FENCES: 820 if (param->value > dev_priv->num_fence_regs || 821 param->value < 0) 822 return -EINVAL; 823 /* Userspace can use first N regs */ 824 dev_priv->fence_reg_start = param->value; 825 break; 826 default: 827 DRM_DEBUG_DRIVER("unknown parameter %d\n", 828 param->param); 829 return -EINVAL; 830 } 831 832 return 0; 833} 834 835static int i915_set_status_page(struct drm_device *dev, void *data, 836 struct drm_file *file_priv) 837{ 838 drm_i915_private_t *dev_priv = dev->dev_private; 839 drm_i915_hws_addr_t *hws = data; 840 struct intel_ring_buffer *ring = LP_RING(dev_priv); 841 842 if (!I915_NEED_GFX_HWS(dev)) 843 return -EINVAL; 844 845 if (!dev_priv) { 846 DRM_ERROR("called with no initialization\n"); 847 return -EINVAL; 848 } 849 850 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 851 WARN(1, "tried to set status page when mode setting active\n"); 852 return 0; 853 } 854 855 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 856 857 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 858 859 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 860 dev_priv->hws_map.size = 4*1024; 861 dev_priv->hws_map.type = 0; 862 dev_priv->hws_map.flags = 0; 863 dev_priv->hws_map.mtrr = 0; 864 865 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 866 if (dev_priv->hws_map.handle == NULL) { 867 i915_dma_cleanup(dev); 868 ring->status_page.gfx_addr = 0; 869 DRM_ERROR("can not ioremap virtual address for" 870 " G33 hw status page\n"); 871 return -ENOMEM; 872 } 873 ring->status_page.page_addr = dev_priv->hws_map.handle; 874 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 875 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 876 877 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 878 ring->status_page.gfx_addr); 879 DRM_DEBUG_DRIVER("load hws at %p\n", 880 ring->status_page.page_addr); 881 return 0; 882} 883 884static int i915_get_bridge_dev(struct drm_device *dev) 885{ 886 struct drm_i915_private *dev_priv = dev->dev_private; 887 888 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 889 if (!dev_priv->bridge_dev) { 890 DRM_ERROR("bridge device not found\n"); 891 return -1; 892 } 893 return 0; 894} 895 896#define MCHBAR_I915 0x44 897#define MCHBAR_I965 0x48 898#define MCHBAR_SIZE (4*4096) 899 900#define DEVEN_REG 0x54 901#define DEVEN_MCHBAR_EN (1 << 28) 902 903/* Allocate space for the MCH regs if needed, return nonzero on error */ 904static int 905intel_alloc_mchbar_resource(struct drm_device *dev) 906{ 907 drm_i915_private_t *dev_priv = dev->dev_private; 908 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 909 u32 temp_lo, temp_hi = 0; 910 u64 mchbar_addr; 911 int ret; 912 913 if (INTEL_INFO(dev)->gen >= 4) 914 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 915 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 916 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 917 918 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 919#ifdef CONFIG_PNP 920 if (mchbar_addr && 921 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 922 return 0; 923#endif 924 925 /* Get some space for it */ 926 dev_priv->mch_res.name = "i915 MCHBAR"; 927 dev_priv->mch_res.flags = IORESOURCE_MEM; 928 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, 929 &dev_priv->mch_res, 930 MCHBAR_SIZE, MCHBAR_SIZE, 931 PCIBIOS_MIN_MEM, 932 0, pcibios_align_resource, 933 dev_priv->bridge_dev); 934 if (ret) { 935 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 936 dev_priv->mch_res.start = 0; 937 return ret; 938 } 939 940 if (INTEL_INFO(dev)->gen >= 4) 941 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 942 upper_32_bits(dev_priv->mch_res.start)); 943 944 pci_write_config_dword(dev_priv->bridge_dev, reg, 945 lower_32_bits(dev_priv->mch_res.start)); 946 return 0; 947} 948 949/* Setup MCHBAR if possible, return true if we should disable it again */ 950static void 951intel_setup_mchbar(struct drm_device *dev) 952{ 953 drm_i915_private_t *dev_priv = dev->dev_private; 954 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 955 u32 temp; 956 bool enabled; 957 958 dev_priv->mchbar_need_disable = false; 959 960 if (IS_I915G(dev) || IS_I915GM(dev)) { 961 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 962 enabled = !!(temp & DEVEN_MCHBAR_EN); 963 } else { 964 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 965 enabled = temp & 1; 966 } 967 968 /* If it's already enabled, don't have to do anything */ 969 if (enabled) 970 return; 971 972 if (intel_alloc_mchbar_resource(dev)) 973 return; 974 975 dev_priv->mchbar_need_disable = true; 976 977 /* Space is allocated or reserved, so enable it. */ 978 if (IS_I915G(dev) || IS_I915GM(dev)) { 979 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, 980 temp | DEVEN_MCHBAR_EN); 981 } else { 982 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 983 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 984 } 985} 986 987static void 988intel_teardown_mchbar(struct drm_device *dev) 989{ 990 drm_i915_private_t *dev_priv = dev->dev_private; 991 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 992 u32 temp; 993 994 if (dev_priv->mchbar_need_disable) { 995 if (IS_I915G(dev) || IS_I915GM(dev)) { 996 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 997 temp &= ~DEVEN_MCHBAR_EN; 998 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); 999 } else { 1000 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 1001 temp &= ~1; 1002 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); 1003 } 1004 } 1005 1006 if (dev_priv->mch_res.start) 1007 release_resource(&dev_priv->mch_res); 1008} 1009 1010#define PTE_ADDRESS_MASK 0xfffff000 1011#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 1012#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 1013#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ 1014#define PTE_MAPPING_TYPE_CACHED (3 << 1) 1015#define PTE_MAPPING_TYPE_MASK (3 << 1) 1016#define PTE_VALID (1 << 0) 1017 1018/** 1019 * i915_stolen_to_phys - take an offset into stolen memory and turn it into 1020 * a physical one 1021 * @dev: drm device 1022 * @offset: address to translate 1023 * 1024 * Some chip functions require allocations from stolen space and need the 1025 * physical address of the memory in question. 1026 */ 1027static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) 1028{ 1029 struct drm_i915_private *dev_priv = dev->dev_private; 1030 struct pci_dev *pdev = dev_priv->bridge_dev; 1031 u32 base; 1032 1033#if 0 1034 /* On the machines I have tested the Graphics Base of Stolen Memory 1035 * is unreliable, so compute the base by subtracting the stolen memory 1036 * from the Top of Low Usable DRAM which is where the BIOS places 1037 * the graphics stolen memory. 1038 */ 1039 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 1040 /* top 32bits are reserved = 0 */ 1041 pci_read_config_dword(pdev, 0xA4, &base); 1042 } else { 1043 /* XXX presume 8xx is the same as i915 */ 1044 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); 1045 } 1046#else 1047 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 1048 u16 val; 1049 pci_read_config_word(pdev, 0xb0, &val); 1050 base = val >> 4 << 20; 1051 } else { 1052 u8 val; 1053 pci_read_config_byte(pdev, 0x9c, &val); 1054 base = val >> 3 << 27; 1055 } 1056 base -= dev_priv->mm.gtt->stolen_size; 1057#endif 1058 1059 return base + offset; 1060} 1061 1062static void i915_warn_stolen(struct drm_device *dev) 1063{ 1064 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); 1065 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); 1066} 1067 1068static void i915_setup_compression(struct drm_device *dev, int size) 1069{ 1070 struct drm_i915_private *dev_priv = dev->dev_private; 1071 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 1072 unsigned long cfb_base; 1073 unsigned long ll_base = 0; 1074 1075 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 1076 if (compressed_fb) 1077 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1078 if (!compressed_fb) 1079 goto err; 1080 1081 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); 1082 if (!cfb_base) 1083 goto err_fb; 1084 1085 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { 1086 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 1087 4096, 4096, 0); 1088 if (compressed_llb) 1089 compressed_llb = drm_mm_get_block(compressed_llb, 1090 4096, 4096); 1091 if (!compressed_llb) 1092 goto err_fb; 1093 1094 ll_base = i915_stolen_to_phys(dev, compressed_llb->start); 1095 if (!ll_base) 1096 goto err_llb; 1097 } 1098 1099 dev_priv->cfb_size = size; 1100 1101 intel_disable_fbc(dev); 1102 dev_priv->compressed_fb = compressed_fb; 1103 if (HAS_PCH_SPLIT(dev)) 1104 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1105 else if (IS_GM45(dev)) { 1106 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1107 } else { 1108 I915_WRITE(FBC_CFB_BASE, cfb_base); 1109 I915_WRITE(FBC_LL_BASE, ll_base); 1110 dev_priv->compressed_llb = compressed_llb; 1111 } 1112 1113 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", 1114 cfb_base, ll_base, size >> 20); 1115 return; 1116 1117err_llb: 1118 drm_mm_put_block(compressed_llb); 1119err_fb: 1120 drm_mm_put_block(compressed_fb); 1121err: 1122 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1123 i915_warn_stolen(dev); 1124} 1125 1126static void i915_cleanup_compression(struct drm_device *dev) 1127{ 1128 struct drm_i915_private *dev_priv = dev->dev_private; 1129 1130 drm_mm_put_block(dev_priv->compressed_fb); 1131 if (dev_priv->compressed_llb) 1132 drm_mm_put_block(dev_priv->compressed_llb); 1133} 1134 1135/* true = enable decode, false = disable decoder */ 1136static unsigned int i915_vga_set_decode(void *cookie, bool state) 1137{ 1138 struct drm_device *dev = cookie; 1139 1140 intel_modeset_vga_set_state(dev, state); 1141 if (state) 1142 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1143 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1144 else 1145 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1146} 1147 1148static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1149{ 1150 struct drm_device *dev = pci_get_drvdata(pdev); 1151 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1152 if (state == VGA_SWITCHEROO_ON) { 1153 printk(KERN_INFO "i915: switched on\n"); 1154 /* i915 resume handler doesn't set to D0 */ 1155 pci_set_power_state(dev->pdev, PCI_D0); 1156 i915_resume(dev); 1157 } else { 1158 printk(KERN_ERR "i915: switched off\n"); 1159 i915_suspend(dev, pmm); 1160 } 1161} 1162 1163static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 1164{ 1165 struct drm_device *dev = pci_get_drvdata(pdev); 1166 bool can_switch; 1167 1168 spin_lock(&dev->count_lock); 1169 can_switch = (dev->open_count == 0); 1170 spin_unlock(&dev->count_lock); 1171 return can_switch; 1172} 1173 1174static int i915_load_modeset_init(struct drm_device *dev) 1175{ 1176 struct drm_i915_private *dev_priv = dev->dev_private; 1177 unsigned long prealloc_size, gtt_size, mappable_size; 1178 int ret = 0; 1179 1180 prealloc_size = dev_priv->mm.gtt->stolen_size; 1181 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; 1182 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1183 1184 /* Basic memrange allocator for stolen space */ 1185 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); 1186 1187 /* Let GEM Manage all of the aperture. 1188 * 1189 * However, leave one page at the end still bound to the scratch page. 1190 * There are a number of places where the hardware apparently 1191 * prefetches past the end of the object, and we've seen multiple 1192 * hangs with the GPU head pointer stuck in a batchbuffer bound 1193 * at the last page of the aperture. One page should be enough to 1194 * keep any prefetching inside of the aperture. 1195 */ 1196 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); 1197 1198 mutex_lock(&dev->struct_mutex); 1199 ret = i915_gem_init_ringbuffer(dev); 1200 mutex_unlock(&dev->struct_mutex); 1201 if (ret) 1202 goto out; 1203 1204 /* Try to set up FBC with a reasonable compressed buffer size */ 1205 if (I915_HAS_FBC(dev) && i915_powersave) { 1206 int cfb_size; 1207 1208 /* Leave 1M for line length buffer & misc. */ 1209 1210 /* Try to get a 32M buffer... */ 1211 if (prealloc_size > (36*1024*1024)) 1212 cfb_size = 32*1024*1024; 1213 else /* fall back to 7/8 of the stolen space */ 1214 cfb_size = prealloc_size * 7 / 8; 1215 i915_setup_compression(dev, cfb_size); 1216 } 1217 1218 /* Allow hardware batchbuffers unless told otherwise. */ 1219 dev_priv->allow_batchbuffer = 1; 1220 1221 ret = intel_parse_bios(dev); 1222 if (ret) 1223 DRM_INFO("failed to find VBIOS tables\n"); 1224 1225 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1226 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1227 if (ret) 1228 goto cleanup_ringbuffer; 1229 1230 intel_register_dsm_handler(); 1231 1232 ret = vga_switcheroo_register_client(dev->pdev, 1233 i915_switcheroo_set_state, 1234 NULL, 1235 i915_switcheroo_can_switch); 1236 if (ret) 1237 goto cleanup_vga_client; 1238 1239 /* IIR "flip pending" bit means done if this bit is set */ 1240 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) 1241 dev_priv->flip_pending_is_done = true; 1242 1243 intel_modeset_init(dev); 1244 1245 ret = drm_irq_install(dev); 1246 if (ret) 1247 goto cleanup_vga_switcheroo; 1248 1249 /* Always safe in the mode setting case. */ 1250 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1251 dev->vblank_disable_allowed = 1; 1252 1253 ret = intel_fbdev_init(dev); 1254 if (ret) 1255 goto cleanup_irq; 1256 1257 drm_kms_helper_poll_init(dev); 1258 1259 /* We're off and running w/KMS */ 1260 dev_priv->mm.suspended = 0; 1261 1262 return 0; 1263 1264cleanup_irq: 1265 drm_irq_uninstall(dev); 1266cleanup_vga_switcheroo: 1267 vga_switcheroo_unregister_client(dev->pdev); 1268cleanup_vga_client: 1269 vga_client_register(dev->pdev, NULL, NULL, NULL); 1270cleanup_ringbuffer: 1271 mutex_lock(&dev->struct_mutex); 1272 i915_gem_cleanup_ringbuffer(dev); 1273 mutex_unlock(&dev->struct_mutex); 1274out: 1275 return ret; 1276} 1277 1278int i915_master_create(struct drm_device *dev, struct drm_master *master) 1279{ 1280 struct drm_i915_master_private *master_priv; 1281 1282 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 1283 if (!master_priv) 1284 return -ENOMEM; 1285 1286 master->driver_priv = master_priv; 1287 return 0; 1288} 1289 1290void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1291{ 1292 struct drm_i915_master_private *master_priv = master->driver_priv; 1293 1294 if (!master_priv) 1295 return; 1296 1297 kfree(master_priv); 1298 1299 master->driver_priv = NULL; 1300} 1301 1302static void i915_pineview_get_mem_freq(struct drm_device *dev) 1303{ 1304 drm_i915_private_t *dev_priv = dev->dev_private; 1305 u32 tmp; 1306 1307 tmp = I915_READ(CLKCFG); 1308 1309 switch (tmp & CLKCFG_FSB_MASK) { 1310 case CLKCFG_FSB_533: 1311 dev_priv->fsb_freq = 533; /* 133*4 */ 1312 break; 1313 case CLKCFG_FSB_800: 1314 dev_priv->fsb_freq = 800; /* 200*4 */ 1315 break; 1316 case CLKCFG_FSB_667: 1317 dev_priv->fsb_freq = 667; /* 167*4 */ 1318 break; 1319 case CLKCFG_FSB_400: 1320 dev_priv->fsb_freq = 400; /* 100*4 */ 1321 break; 1322 } 1323 1324 switch (tmp & CLKCFG_MEM_MASK) { 1325 case CLKCFG_MEM_533: 1326 dev_priv->mem_freq = 533; 1327 break; 1328 case CLKCFG_MEM_667: 1329 dev_priv->mem_freq = 667; 1330 break; 1331 case CLKCFG_MEM_800: 1332 dev_priv->mem_freq = 800; 1333 break; 1334 } 1335 1336 /* detect pineview DDR3 setting */ 1337 tmp = I915_READ(CSHRDDR3CTL); 1338 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 1339} 1340 1341static void i915_ironlake_get_mem_freq(struct drm_device *dev) 1342{ 1343 drm_i915_private_t *dev_priv = dev->dev_private; 1344 u16 ddrpll, csipll; 1345 1346 ddrpll = I915_READ16(DDRMPLL1); 1347 csipll = I915_READ16(CSIPLL0); 1348 1349 switch (ddrpll & 0xff) { 1350 case 0xc: 1351 dev_priv->mem_freq = 800; 1352 break; 1353 case 0x10: 1354 dev_priv->mem_freq = 1066; 1355 break; 1356 case 0x14: 1357 dev_priv->mem_freq = 1333; 1358 break; 1359 case 0x18: 1360 dev_priv->mem_freq = 1600; 1361 break; 1362 default: 1363 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 1364 ddrpll & 0xff); 1365 dev_priv->mem_freq = 0; 1366 break; 1367 } 1368 1369 dev_priv->r_t = dev_priv->mem_freq; 1370 1371 switch (csipll & 0x3ff) { 1372 case 0x00c: 1373 dev_priv->fsb_freq = 3200; 1374 break; 1375 case 0x00e: 1376 dev_priv->fsb_freq = 3733; 1377 break; 1378 case 0x010: 1379 dev_priv->fsb_freq = 4266; 1380 break; 1381 case 0x012: 1382 dev_priv->fsb_freq = 4800; 1383 break; 1384 case 0x014: 1385 dev_priv->fsb_freq = 5333; 1386 break; 1387 case 0x016: 1388 dev_priv->fsb_freq = 5866; 1389 break; 1390 case 0x018: 1391 dev_priv->fsb_freq = 6400; 1392 break; 1393 default: 1394 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 1395 csipll & 0x3ff); 1396 dev_priv->fsb_freq = 0; 1397 break; 1398 } 1399 1400 if (dev_priv->fsb_freq == 3200) { 1401 dev_priv->c_m = 0; 1402 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 1403 dev_priv->c_m = 1; 1404 } else { 1405 dev_priv->c_m = 2; 1406 } 1407} 1408 1409static const struct cparams { 1410 u16 i; 1411 u16 t; 1412 u16 m; 1413 u16 c; 1414} cparams[] = { 1415 { 1, 1333, 301, 28664 }, 1416 { 1, 1066, 294, 24460 }, 1417 { 1, 800, 294, 25192 }, 1418 { 0, 1333, 276, 27605 }, 1419 { 0, 1066, 276, 27605 }, 1420 { 0, 800, 231, 23784 }, 1421}; 1422 1423unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 1424{ 1425 u64 total_count, diff, ret; 1426 u32 count1, count2, count3, m = 0, c = 0; 1427 unsigned long now = jiffies_to_msecs(jiffies), diff1; 1428 int i; 1429 1430 diff1 = now - dev_priv->last_time1; 1431 1432 count1 = I915_READ(DMIEC); 1433 count2 = I915_READ(DDREC); 1434 count3 = I915_READ(CSIEC); 1435 1436 total_count = count1 + count2 + count3; 1437 1438 /* FIXME: handle per-counter overflow */ 1439 if (total_count < dev_priv->last_count1) { 1440 diff = ~0UL - dev_priv->last_count1; 1441 diff += total_count; 1442 } else { 1443 diff = total_count - dev_priv->last_count1; 1444 } 1445 1446 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 1447 if (cparams[i].i == dev_priv->c_m && 1448 cparams[i].t == dev_priv->r_t) { 1449 m = cparams[i].m; 1450 c = cparams[i].c; 1451 break; 1452 } 1453 } 1454 1455 diff = div_u64(diff, diff1); 1456 ret = ((m * diff) + c); 1457 ret = div_u64(ret, 10); 1458 1459 dev_priv->last_count1 = total_count; 1460 dev_priv->last_time1 = now; 1461 1462 return ret; 1463} 1464 1465unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 1466{ 1467 unsigned long m, x, b; 1468 u32 tsfs; 1469 1470 tsfs = I915_READ(TSFS); 1471 1472 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 1473 x = I915_READ8(TR1); 1474 1475 b = tsfs & TSFS_INTR_MASK; 1476 1477 return ((m * x) / 127) - b; 1478} 1479 1480static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 1481{ 1482 static const struct v_table { 1483 u16 vd; /* in .1 mil */ 1484 u16 vm; /* in .1 mil */ 1485 } v_table[] = { 1486 { 0, 0, }, 1487 { 375, 0, }, 1488 { 500, 0, }, 1489 { 625, 0, }, 1490 { 750, 0, }, 1491 { 875, 0, }, 1492 { 1000, 0, }, 1493 { 1125, 0, }, 1494 { 4125, 3000, }, 1495 { 4125, 3000, }, 1496 { 4125, 3000, }, 1497 { 4125, 3000, }, 1498 { 4125, 3000, }, 1499 { 4125, 3000, }, 1500 { 4125, 3000, }, 1501 { 4125, 3000, }, 1502 { 4125, 3000, }, 1503 { 4125, 3000, }, 1504 { 4125, 3000, }, 1505 { 4125, 3000, }, 1506 { 4125, 3000, }, 1507 { 4125, 3000, }, 1508 { 4125, 3000, }, 1509 { 4125, 3000, }, 1510 { 4125, 3000, }, 1511 { 4125, 3000, }, 1512 { 4125, 3000, }, 1513 { 4125, 3000, }, 1514 { 4125, 3000, }, 1515 { 4125, 3000, }, 1516 { 4125, 3000, }, 1517 { 4125, 3000, }, 1518 { 4250, 3125, }, 1519 { 4375, 3250, }, 1520 { 4500, 3375, }, 1521 { 4625, 3500, }, 1522 { 4750, 3625, }, 1523 { 4875, 3750, }, 1524 { 5000, 3875, }, 1525 { 5125, 4000, }, 1526 { 5250, 4125, }, 1527 { 5375, 4250, }, 1528 { 5500, 4375, }, 1529 { 5625, 4500, }, 1530 { 5750, 4625, }, 1531 { 5875, 4750, }, 1532 { 6000, 4875, }, 1533 { 6125, 5000, }, 1534 { 6250, 5125, }, 1535 { 6375, 5250, }, 1536 { 6500, 5375, }, 1537 { 6625, 5500, }, 1538 { 6750, 5625, }, 1539 { 6875, 5750, }, 1540 { 7000, 5875, }, 1541 { 7125, 6000, }, 1542 { 7250, 6125, }, 1543 { 7375, 6250, }, 1544 { 7500, 6375, }, 1545 { 7625, 6500, }, 1546 { 7750, 6625, }, 1547 { 7875, 6750, }, 1548 { 8000, 6875, }, 1549 { 8125, 7000, }, 1550 { 8250, 7125, }, 1551 { 8375, 7250, }, 1552 { 8500, 7375, }, 1553 { 8625, 7500, }, 1554 { 8750, 7625, }, 1555 { 8875, 7750, }, 1556 { 9000, 7875, }, 1557 { 9125, 8000, }, 1558 { 9250, 8125, }, 1559 { 9375, 8250, }, 1560 { 9500, 8375, }, 1561 { 9625, 8500, }, 1562 { 9750, 8625, }, 1563 { 9875, 8750, }, 1564 { 10000, 8875, }, 1565 { 10125, 9000, }, 1566 { 10250, 9125, }, 1567 { 10375, 9250, }, 1568 { 10500, 9375, }, 1569 { 10625, 9500, }, 1570 { 10750, 9625, }, 1571 { 10875, 9750, }, 1572 { 11000, 9875, }, 1573 { 11125, 10000, }, 1574 { 11250, 10125, }, 1575 { 11375, 10250, }, 1576 { 11500, 10375, }, 1577 { 11625, 10500, }, 1578 { 11750, 10625, }, 1579 { 11875, 10750, }, 1580 { 12000, 10875, }, 1581 { 12125, 11000, }, 1582 { 12250, 11125, }, 1583 { 12375, 11250, }, 1584 { 12500, 11375, }, 1585 { 12625, 11500, }, 1586 { 12750, 11625, }, 1587 { 12875, 11750, }, 1588 { 13000, 11875, }, 1589 { 13125, 12000, }, 1590 { 13250, 12125, }, 1591 { 13375, 12250, }, 1592 { 13500, 12375, }, 1593 { 13625, 12500, }, 1594 { 13750, 12625, }, 1595 { 13875, 12750, }, 1596 { 14000, 12875, }, 1597 { 14125, 13000, }, 1598 { 14250, 13125, }, 1599 { 14375, 13250, }, 1600 { 14500, 13375, }, 1601 { 14625, 13500, }, 1602 { 14750, 13625, }, 1603 { 14875, 13750, }, 1604 { 15000, 13875, }, 1605 { 15125, 14000, }, 1606 { 15250, 14125, }, 1607 { 15375, 14250, }, 1608 { 15500, 14375, }, 1609 { 15625, 14500, }, 1610 { 15750, 14625, }, 1611 { 15875, 14750, }, 1612 { 16000, 14875, }, 1613 { 16125, 15000, }, 1614 }; 1615 if (dev_priv->info->is_mobile) 1616 return v_table[pxvid].vm; 1617 else 1618 return v_table[pxvid].vd; 1619} 1620 1621void i915_update_gfx_val(struct drm_i915_private *dev_priv) 1622{ 1623 struct timespec now, diff1; 1624 u64 diff; 1625 unsigned long diffms; 1626 u32 count; 1627 1628 getrawmonotonic(&now); 1629 diff1 = timespec_sub(now, dev_priv->last_time2); 1630 1631 /* Don't divide by 0 */ 1632 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 1633 if (!diffms) 1634 return; 1635 1636 count = I915_READ(GFXEC); 1637 1638 if (count < dev_priv->last_count2) { 1639 diff = ~0UL - dev_priv->last_count2; 1640 diff += count; 1641 } else { 1642 diff = count - dev_priv->last_count2; 1643 } 1644 1645 dev_priv->last_count2 = count; 1646 dev_priv->last_time2 = now; 1647 1648 /* More magic constants... */ 1649 diff = diff * 1181; 1650 diff = div_u64(diff, diffms * 10); 1651 dev_priv->gfx_power = diff; 1652} 1653 1654unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 1655{ 1656 unsigned long t, corr, state1, corr2, state2; 1657 u32 pxvid, ext_v; 1658 1659 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 1660 pxvid = (pxvid >> 24) & 0x7f; 1661 ext_v = pvid_to_extvid(dev_priv, pxvid); 1662 1663 state1 = ext_v; 1664 1665 t = i915_mch_val(dev_priv); 1666 1667 /* Revel in the empirically derived constants */ 1668 1669 /* Correction factor in 1/100000 units */ 1670 if (t > 80) 1671 corr = ((t * 2349) + 135940); 1672 else if (t >= 50) 1673 corr = ((t * 964) + 29317); 1674 else /* < 50 */ 1675 corr = ((t * 301) + 1004); 1676 1677 corr = corr * ((150142 * state1) / 10000 - 78642); 1678 corr /= 100000; 1679 corr2 = (corr * dev_priv->corr); 1680 1681 state2 = (corr2 * state1) / 10000; 1682 state2 /= 100; /* convert to mW */ 1683 1684 i915_update_gfx_val(dev_priv); 1685 1686 return dev_priv->gfx_power + state2; 1687} 1688 1689/* Global for IPS driver to get at the current i915 device */ 1690static struct drm_i915_private *i915_mch_dev; 1691/* 1692 * Lock protecting IPS related data structures 1693 * - i915_mch_dev 1694 * - dev_priv->max_delay 1695 * - dev_priv->min_delay 1696 * - dev_priv->fmax 1697 * - dev_priv->gpu_busy 1698 */ 1699static DEFINE_SPINLOCK(mchdev_lock); 1700 1701/** 1702 * i915_read_mch_val - return value for IPS use 1703 * 1704 * Calculate and return a value for the IPS driver to use when deciding whether 1705 * we have thermal and power headroom to increase CPU or GPU power budget. 1706 */ 1707unsigned long i915_read_mch_val(void) 1708{ 1709 struct drm_i915_private *dev_priv; 1710 unsigned long chipset_val, graphics_val, ret = 0; 1711 1712 spin_lock(&mchdev_lock); 1713 if (!i915_mch_dev) 1714 goto out_unlock; 1715 dev_priv = i915_mch_dev; 1716 1717 chipset_val = i915_chipset_val(dev_priv); 1718 graphics_val = i915_gfx_val(dev_priv); 1719 1720 ret = chipset_val + graphics_val; 1721 1722out_unlock: 1723 spin_unlock(&mchdev_lock); 1724 1725 return ret; 1726} 1727EXPORT_SYMBOL_GPL(i915_read_mch_val); 1728 1729/** 1730 * i915_gpu_raise - raise GPU frequency limit 1731 * 1732 * Raise the limit; IPS indicates we have thermal headroom. 1733 */ 1734bool i915_gpu_raise(void) 1735{ 1736 struct drm_i915_private *dev_priv; 1737 bool ret = true; 1738 1739 spin_lock(&mchdev_lock); 1740 if (!i915_mch_dev) { 1741 ret = false; 1742 goto out_unlock; 1743 } 1744 dev_priv = i915_mch_dev; 1745 1746 if (dev_priv->max_delay > dev_priv->fmax) 1747 dev_priv->max_delay--; 1748 1749out_unlock: 1750 spin_unlock(&mchdev_lock); 1751 1752 return ret; 1753} 1754EXPORT_SYMBOL_GPL(i915_gpu_raise); 1755 1756/** 1757 * i915_gpu_lower - lower GPU frequency limit 1758 * 1759 * IPS indicates we're close to a thermal limit, so throttle back the GPU 1760 * frequency maximum. 1761 */ 1762bool i915_gpu_lower(void) 1763{ 1764 struct drm_i915_private *dev_priv; 1765 bool ret = true; 1766 1767 spin_lock(&mchdev_lock); 1768 if (!i915_mch_dev) { 1769 ret = false; 1770 goto out_unlock; 1771 } 1772 dev_priv = i915_mch_dev; 1773 1774 if (dev_priv->max_delay < dev_priv->min_delay) 1775 dev_priv->max_delay++; 1776 1777out_unlock: 1778 spin_unlock(&mchdev_lock); 1779 1780 return ret; 1781} 1782EXPORT_SYMBOL_GPL(i915_gpu_lower); 1783 1784/** 1785 * i915_gpu_busy - indicate GPU business to IPS 1786 * 1787 * Tell the IPS driver whether or not the GPU is busy. 1788 */ 1789bool i915_gpu_busy(void) 1790{ 1791 struct drm_i915_private *dev_priv; 1792 bool ret = false; 1793 1794 spin_lock(&mchdev_lock); 1795 if (!i915_mch_dev) 1796 goto out_unlock; 1797 dev_priv = i915_mch_dev; 1798 1799 ret = dev_priv->busy; 1800 1801out_unlock: 1802 spin_unlock(&mchdev_lock); 1803 1804 return ret; 1805} 1806EXPORT_SYMBOL_GPL(i915_gpu_busy); 1807 1808/** 1809 * i915_gpu_turbo_disable - disable graphics turbo 1810 * 1811 * Disable graphics turbo by resetting the max frequency and setting the 1812 * current frequency to the default. 1813 */ 1814bool i915_gpu_turbo_disable(void) 1815{ 1816 struct drm_i915_private *dev_priv; 1817 bool ret = true; 1818 1819 spin_lock(&mchdev_lock); 1820 if (!i915_mch_dev) { 1821 ret = false; 1822 goto out_unlock; 1823 } 1824 dev_priv = i915_mch_dev; 1825 1826 dev_priv->max_delay = dev_priv->fstart; 1827 1828 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 1829 ret = false; 1830 1831out_unlock: 1832 spin_unlock(&mchdev_lock); 1833 1834 return ret; 1835} 1836EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 1837 1838/** 1839 * Tells the intel_ips driver that the i915 driver is now loaded, if 1840 * IPS got loaded first. 1841 * 1842 * This awkward dance is so that neither module has to depend on the 1843 * other in order for IPS to do the appropriate communication of 1844 * GPU turbo limits to i915. 1845 */ 1846static void 1847ips_ping_for_i915_load(void) 1848{ 1849 void (*link)(void); 1850 1851 link = symbol_get(ips_link_to_i915_driver); 1852 if (link) { 1853 link(); 1854 symbol_put(ips_link_to_i915_driver); 1855 } 1856} 1857 1858/** 1859 * i915_driver_load - setup chip and create an initial config 1860 * @dev: DRM device 1861 * @flags: startup flags 1862 * 1863 * The driver load routine has to do several things: 1864 * - drive output discovery via intel_modeset_init() 1865 * - initialize the memory manager 1866 * - allocate initial config memory 1867 * - setup the DRM framebuffer with the allocated memory 1868 */ 1869int i915_driver_load(struct drm_device *dev, unsigned long flags) 1870{ 1871 struct drm_i915_private *dev_priv; 1872 int ret = 0, mmio_bar; 1873 uint32_t agp_size; 1874 1875 /* i915 has 4 more counters */ 1876 dev->counters += 4; 1877 dev->types[6] = _DRM_STAT_IRQ; 1878 dev->types[7] = _DRM_STAT_PRIMARY; 1879 dev->types[8] = _DRM_STAT_SECONDARY; 1880 dev->types[9] = _DRM_STAT_DMA; 1881 1882 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); 1883 if (dev_priv == NULL) 1884 return -ENOMEM; 1885 1886 dev->dev_private = (void *)dev_priv; 1887 dev_priv->dev = dev; 1888 dev_priv->info = (struct intel_device_info *) flags; 1889 1890 if (i915_get_bridge_dev(dev)) { 1891 ret = -EIO; 1892 goto free_priv; 1893 } 1894 1895 /* overlay on gen2 is broken and can't address above 1G */ 1896 if (IS_GEN2(dev)) 1897 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1898 1899 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1900 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1901 if (!dev_priv->regs) { 1902 DRM_ERROR("failed to map registers\n"); 1903 ret = -EIO; 1904 goto put_bridge; 1905 } 1906 1907 dev_priv->mm.gtt = intel_gtt_get(); 1908 if (!dev_priv->mm.gtt) { 1909 DRM_ERROR("Failed to initialize GTT\n"); 1910 ret = -ENODEV; 1911 goto out_iomapfree; 1912 } 1913 1914 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1915 1916 dev_priv->mm.gtt_mapping = 1917 io_mapping_create_wc(dev->agp->base, agp_size); 1918 if (dev_priv->mm.gtt_mapping == NULL) { 1919 ret = -EIO; 1920 goto out_rmmap; 1921 } 1922 1923 /* Set up a WC MTRR for non-PAT systems. This is more common than 1924 * one would think, because the kernel disables PAT on first 1925 * generation Core chips because WC PAT gets overridden by a UC 1926 * MTRR if present. Even if a UC MTRR isn't present. 1927 */ 1928 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 1929 agp_size, 1930 MTRR_TYPE_WRCOMB, 1); 1931 if (dev_priv->mm.gtt_mtrr < 0) { 1932 DRM_INFO("MTRR allocation failed. Graphics " 1933 "performance may suffer.\n"); 1934 } 1935 1936 /* The i915 workqueue is primarily used for batched retirement of 1937 * requests (and thus managing bo) once the task has been completed 1938 * by the GPU. i915_gem_retire_requests() is called directly when we 1939 * need high-priority retirement, such as waiting for an explicit 1940 * bo. 1941 * 1942 * It is also used for periodic low-priority events, such as 1943 * idle-timers and recording error state. 1944 * 1945 * All tasks on the workqueue are expected to acquire the dev mutex 1946 * so there is no point in running more than one instance of the 1947 * workqueue at any time: max_active = 1 and NON_REENTRANT. 1948 */ 1949 dev_priv->wq = alloc_workqueue("i915", 1950 WQ_UNBOUND | WQ_NON_REENTRANT, 1951 1); 1952 if (dev_priv->wq == NULL) { 1953 DRM_ERROR("Failed to create our workqueue.\n"); 1954 ret = -ENOMEM; 1955 goto out_iomapfree; 1956 } 1957 1958 /* enable GEM by default */ 1959 dev_priv->has_gem = 1; 1960 1961 if (dev_priv->has_gem == 0 && 1962 drm_core_check_feature(dev, DRIVER_MODESET)) { 1963 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); 1964 ret = -ENODEV; 1965 goto out_workqueue_free; 1966 } 1967 1968 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1969 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1970 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1971 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1972 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1973 } 1974 1975 /* Try to make sure MCHBAR is enabled before poking at it */ 1976 intel_setup_mchbar(dev); 1977 intel_setup_gmbus(dev); 1978 intel_opregion_setup(dev); 1979 1980 /* Make sure the bios did its job and set up vital registers */ 1981 intel_setup_bios(dev); 1982 1983 i915_gem_load(dev); 1984 1985 /* Init HWS */ 1986 if (!I915_NEED_GFX_HWS(dev)) { 1987 ret = i915_init_phys_hws(dev); 1988 if (ret) 1989 goto out_gem_unload; 1990 } 1991 1992 if (IS_PINEVIEW(dev)) 1993 i915_pineview_get_mem_freq(dev); 1994 else if (IS_GEN5(dev)) 1995 i915_ironlake_get_mem_freq(dev); 1996 1997 /* On the 945G/GM, the chipset reports the MSI capability on the 1998 * integrated graphics even though the support isn't actually there 1999 * according to the published specs. It doesn't appear to function 2000 * correctly in testing on 945G. 2001 * This may be a side effect of MSI having been made available for PEG 2002 * and the registers being closely associated. 2003 * 2004 * According to chipset errata, on the 965GM, MSI interrupts may 2005 * be lost or delayed, but we use them anyways to avoid 2006 * stuck interrupts on some machines. 2007 */ 2008 if (!IS_I945G(dev) && !IS_I945GM(dev)) 2009 pci_enable_msi(dev->pdev); 2010 2011 spin_lock_init(&dev_priv->irq_lock); 2012 spin_lock_init(&dev_priv->error_lock); 2013 dev_priv->trace_irq_seqno = 0; 2014 2015 ret = drm_vblank_init(dev, I915_NUM_PIPE); 2016 if (ret) 2017 goto out_gem_unload; 2018 2019 /* Start out suspended */ 2020 dev_priv->mm.suspended = 1; 2021 2022 intel_detect_pch(dev); 2023 2024 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2025 ret = i915_load_modeset_init(dev); 2026 if (ret < 0) { 2027 DRM_ERROR("failed to init modeset\n"); 2028 goto out_gem_unload; 2029 } 2030 } 2031 2032 /* Must be done after probing outputs */ 2033 intel_opregion_init(dev); 2034 acpi_video_register(); 2035 2036 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2037 (unsigned long) dev); 2038 2039 spin_lock(&mchdev_lock); 2040 i915_mch_dev = dev_priv; 2041 dev_priv->mchdev_lock = &mchdev_lock; 2042 spin_unlock(&mchdev_lock); 2043 2044 ips_ping_for_i915_load(); 2045 2046 return 0; 2047 2048out_gem_unload: 2049 if (dev->pdev->msi_enabled) 2050 pci_disable_msi(dev->pdev); 2051 2052 intel_teardown_gmbus(dev); 2053 intel_teardown_mchbar(dev); 2054out_workqueue_free: 2055 destroy_workqueue(dev_priv->wq); 2056out_iomapfree: 2057 io_mapping_free(dev_priv->mm.gtt_mapping); 2058out_rmmap: 2059 pci_iounmap(dev->pdev, dev_priv->regs); 2060put_bridge: 2061 pci_dev_put(dev_priv->bridge_dev); 2062free_priv: 2063 kfree(dev_priv); 2064 return ret; 2065} 2066 2067int i915_driver_unload(struct drm_device *dev) 2068{ 2069 struct drm_i915_private *dev_priv = dev->dev_private; 2070 int ret; 2071 2072 spin_lock(&mchdev_lock); 2073 i915_mch_dev = NULL; 2074 spin_unlock(&mchdev_lock); 2075 2076 if (dev_priv->mm.inactive_shrinker.shrink) 2077 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 2078 2079 mutex_lock(&dev->struct_mutex); 2080 ret = i915_gpu_idle(dev); 2081 if (ret) 2082 DRM_ERROR("failed to idle hardware: %d\n", ret); 2083 mutex_unlock(&dev->struct_mutex); 2084 2085 /* Cancel the retire work handler, which should be idle now. */ 2086 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 2087 2088 io_mapping_free(dev_priv->mm.gtt_mapping); 2089 if (dev_priv->mm.gtt_mtrr >= 0) { 2090 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 2091 dev->agp->agp_info.aper_size * 1024 * 1024); 2092 dev_priv->mm.gtt_mtrr = -1; 2093 } 2094 2095 acpi_video_unregister(); 2096 2097 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2098 intel_fbdev_fini(dev); 2099 intel_modeset_cleanup(dev); 2100 2101 /* 2102 * free the memory space allocated for the child device 2103 * config parsed from VBT 2104 */ 2105 if (dev_priv->child_dev && dev_priv->child_dev_num) { 2106 kfree(dev_priv->child_dev); 2107 dev_priv->child_dev = NULL; 2108 dev_priv->child_dev_num = 0; 2109 } 2110 2111 vga_switcheroo_unregister_client(dev->pdev); 2112 vga_client_register(dev->pdev, NULL, NULL, NULL); 2113 } 2114 2115 /* Free error state after interrupts are fully disabled. */ 2116 del_timer_sync(&dev_priv->hangcheck_timer); 2117 cancel_work_sync(&dev_priv->error_work); 2118 i915_destroy_error_state(dev); 2119 2120 if (dev->pdev->msi_enabled) 2121 pci_disable_msi(dev->pdev); 2122 2123 intel_opregion_fini(dev); 2124 2125 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2126 /* Flush any outstanding unpin_work. */ 2127 flush_workqueue(dev_priv->wq); 2128 2129 i915_gem_free_all_phys_object(dev); 2130 2131 mutex_lock(&dev->struct_mutex); 2132 i915_gem_cleanup_ringbuffer(dev); 2133 mutex_unlock(&dev->struct_mutex); 2134 if (I915_HAS_FBC(dev) && i915_powersave) 2135 i915_cleanup_compression(dev); 2136 drm_mm_takedown(&dev_priv->mm.stolen); 2137 2138 intel_cleanup_overlay(dev); 2139 2140 if (!I915_NEED_GFX_HWS(dev)) 2141 i915_free_hws(dev); 2142 } 2143 2144 if (dev_priv->regs != NULL) 2145 pci_iounmap(dev->pdev, dev_priv->regs); 2146 2147 intel_teardown_gmbus(dev); 2148 intel_teardown_mchbar(dev); 2149 2150 destroy_workqueue(dev_priv->wq); 2151 2152 pci_dev_put(dev_priv->bridge_dev); 2153 kfree(dev->dev_private); 2154 2155 return 0; 2156} 2157 2158int i915_driver_open(struct drm_device *dev, struct drm_file *file) 2159{ 2160 struct drm_i915_file_private *file_priv; 2161 2162 DRM_DEBUG_DRIVER("\n"); 2163 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); 2164 if (!file_priv) 2165 return -ENOMEM; 2166 2167 file->driver_priv = file_priv; 2168 2169 spin_lock_init(&file_priv->mm.lock); 2170 INIT_LIST_HEAD(&file_priv->mm.request_list); 2171 2172 return 0; 2173} 2174 2175/** 2176 * i915_driver_lastclose - clean up after all DRM clients have exited 2177 * @dev: DRM device 2178 * 2179 * Take care of cleaning up after all DRM clients have exited. In the 2180 * mode setting case, we want to restore the kernel's initial mode (just 2181 * in case the last client left us in a bad state). 2182 * 2183 * Additionally, in the non-mode setting case, we'll tear down the AGP 2184 * and DMA structures, since the kernel won't be using them, and clea 2185 * up any GEM state. 2186 */ 2187void i915_driver_lastclose(struct drm_device * dev) 2188{ 2189 drm_i915_private_t *dev_priv = dev->dev_private; 2190 2191 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 2192 drm_fb_helper_restore(); 2193 vga_switcheroo_process_delayed_switch(); 2194 return; 2195 } 2196 2197 i915_gem_lastclose(dev); 2198 2199 if (dev_priv->agp_heap) 2200 i915_mem_takedown(&(dev_priv->agp_heap)); 2201 2202 i915_dma_cleanup(dev); 2203} 2204 2205void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 2206{ 2207 drm_i915_private_t *dev_priv = dev->dev_private; 2208 i915_gem_release(dev, file_priv); 2209 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2210 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 2211} 2212 2213void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 2214{ 2215 struct drm_i915_file_private *file_priv = file->driver_priv; 2216 2217 kfree(file_priv); 2218} 2219 2220struct drm_ioctl_desc i915_ioctls[] = { 2221 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2222 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 2223 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), 2224 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 2225 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 2226 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 2227 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), 2228 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2229 DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), 2230 DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), 2231 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2232 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 2233 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2234 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2235 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), 2236 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 2237 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2238 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2239 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 2240 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 2241 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2242 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2243 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 2244 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 2245 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2246 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2247 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 2248 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 2249 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 2250 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 2251 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 2252 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 2253 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 2254 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 2255 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 2256 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 2257 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 2258 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2259 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2260 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2261}; 2262 2263int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2264 2265/** 2266 * Determine if the device really is AGP or not. 2267 * 2268 * All Intel graphics chipsets are treated as AGP, even if they are really 2269 * PCI-e. 2270 * 2271 * \param dev The device to be tested. 2272 * 2273 * \returns 2274 * A value of 1 is always retured to indictate every i9x5 is AGP. 2275 */ 2276int i915_driver_device_is_agp(struct drm_device * dev) 2277{ 2278 return 1; 2279} 2280