i915_dma.c revision 43a9539fa9e780f16c0d1e4bc91a2701f1ce178f
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include "drmP.h" 30#include "drm.h" 31#include "drm_crtc_helper.h" 32#include "drm_fb_helper.h" 33#include "intel_drv.h" 34#include "i915_drm.h" 35#include "i915_drv.h" 36#include "i915_trace.h" 37#include "../../../platform/x86/intel_ips.h" 38#include <linux/pci.h> 39#include <linux/vgaarb.h> 40#include <linux/acpi.h> 41#include <linux/pnp.h> 42#include <linux/vga_switcheroo.h> 43#include <linux/slab.h> 44#include <acpi/video.h> 45 46static void i915_write_hws_pga(struct drm_device *dev) 47{ 48 drm_i915_private_t *dev_priv = dev->dev_private; 49 u32 addr; 50 51 addr = dev_priv->status_page_dmah->busaddr; 52 if (INTEL_INFO(dev)->gen >= 4) 53 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 54 I915_WRITE(HWS_PGA, addr); 55} 56 57/** 58 * Sets up the hardware status page for devices that need a physical address 59 * in the register. 60 */ 61static int i915_init_phys_hws(struct drm_device *dev) 62{ 63 drm_i915_private_t *dev_priv = dev->dev_private; 64 struct intel_ring_buffer *ring = LP_RING(dev_priv); 65 66 /* Program Hardware Status Page */ 67 dev_priv->status_page_dmah = 68 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 69 70 if (!dev_priv->status_page_dmah) { 71 DRM_ERROR("Can not allocate hardware status page\n"); 72 return -ENOMEM; 73 } 74 ring->status_page.page_addr = 75 (void __force __iomem *)dev_priv->status_page_dmah->vaddr; 76 77 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); 78 79 i915_write_hws_pga(dev); 80 81 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 82 return 0; 83} 84 85/** 86 * Frees the hardware status page, whether it's a physical address or a virtual 87 * address set up by the X Server. 88 */ 89static void i915_free_hws(struct drm_device *dev) 90{ 91 drm_i915_private_t *dev_priv = dev->dev_private; 92 struct intel_ring_buffer *ring = LP_RING(dev_priv); 93 94 if (dev_priv->status_page_dmah) { 95 drm_pci_free(dev, dev_priv->status_page_dmah); 96 dev_priv->status_page_dmah = NULL; 97 } 98 99 if (ring->status_page.gfx_addr) { 100 ring->status_page.gfx_addr = 0; 101 drm_core_ioremapfree(&dev_priv->hws_map, dev); 102 } 103 104 /* Need to rewrite hardware status page */ 105 I915_WRITE(HWS_PGA, 0x1ffff000); 106} 107 108void i915_kernel_lost_context(struct drm_device * dev) 109{ 110 drm_i915_private_t *dev_priv = dev->dev_private; 111 struct drm_i915_master_private *master_priv; 112 struct intel_ring_buffer *ring = LP_RING(dev_priv); 113 114 /* 115 * We should never lose context on the ring with modesetting 116 * as we don't expose it to userspace 117 */ 118 if (drm_core_check_feature(dev, DRIVER_MODESET)) 119 return; 120 121 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 122 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 123 ring->space = ring->head - (ring->tail + 8); 124 if (ring->space < 0) 125 ring->space += ring->size; 126 127 if (!dev->primary->master) 128 return; 129 130 master_priv = dev->primary->master->driver_priv; 131 if (ring->head == ring->tail && master_priv->sarea_priv) 132 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 133} 134 135static int i915_dma_cleanup(struct drm_device * dev) 136{ 137 drm_i915_private_t *dev_priv = dev->dev_private; 138 int i; 139 140 /* Make sure interrupts are disabled here because the uninstall ioctl 141 * may not have been called from userspace and after dev_private 142 * is freed, it's too late. 143 */ 144 if (dev->irq_enabled) 145 drm_irq_uninstall(dev); 146 147 mutex_lock(&dev->struct_mutex); 148 for (i = 0; i < I915_NUM_RINGS; i++) 149 intel_cleanup_ring_buffer(&dev_priv->ring[i]); 150 mutex_unlock(&dev->struct_mutex); 151 152 /* Clear the HWS virtual address at teardown */ 153 if (I915_NEED_GFX_HWS(dev)) 154 i915_free_hws(dev); 155 156 return 0; 157} 158 159static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 160{ 161 drm_i915_private_t *dev_priv = dev->dev_private; 162 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 163 int ret; 164 165 master_priv->sarea = drm_getsarea(dev); 166 if (master_priv->sarea) { 167 master_priv->sarea_priv = (drm_i915_sarea_t *) 168 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 169 } else { 170 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); 171 } 172 173 if (init->ring_size != 0) { 174 if (LP_RING(dev_priv)->obj != NULL) { 175 i915_dma_cleanup(dev); 176 DRM_ERROR("Client tried to initialize ringbuffer in " 177 "GEM mode\n"); 178 return -EINVAL; 179 } 180 181 ret = intel_render_ring_init_dri(dev, 182 init->ring_start, 183 init->ring_size); 184 if (ret) { 185 i915_dma_cleanup(dev); 186 return ret; 187 } 188 } 189 190 dev_priv->cpp = init->cpp; 191 dev_priv->back_offset = init->back_offset; 192 dev_priv->front_offset = init->front_offset; 193 dev_priv->current_page = 0; 194 if (master_priv->sarea_priv) 195 master_priv->sarea_priv->pf_current_page = 0; 196 197 /* Allow hardware batchbuffers unless told otherwise. 198 */ 199 dev_priv->allow_batchbuffer = 1; 200 201 return 0; 202} 203 204static int i915_dma_resume(struct drm_device * dev) 205{ 206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 207 struct intel_ring_buffer *ring = LP_RING(dev_priv); 208 209 DRM_DEBUG_DRIVER("%s\n", __func__); 210 211 if (ring->map.handle == NULL) { 212 DRM_ERROR("can not ioremap virtual address for" 213 " ring buffer\n"); 214 return -ENOMEM; 215 } 216 217 /* Program Hardware Status Page */ 218 if (!ring->status_page.page_addr) { 219 DRM_ERROR("Can not find hardware status page\n"); 220 return -EINVAL; 221 } 222 DRM_DEBUG_DRIVER("hw status page @ %p\n", 223 ring->status_page.page_addr); 224 if (ring->status_page.gfx_addr != 0) 225 intel_ring_setup_status_page(ring); 226 else 227 i915_write_hws_pga(dev); 228 229 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 230 231 return 0; 232} 233 234static int i915_dma_init(struct drm_device *dev, void *data, 235 struct drm_file *file_priv) 236{ 237 drm_i915_init_t *init = data; 238 int retcode = 0; 239 240 switch (init->func) { 241 case I915_INIT_DMA: 242 retcode = i915_initialize(dev, init); 243 break; 244 case I915_CLEANUP_DMA: 245 retcode = i915_dma_cleanup(dev); 246 break; 247 case I915_RESUME_DMA: 248 retcode = i915_dma_resume(dev); 249 break; 250 default: 251 retcode = -EINVAL; 252 break; 253 } 254 255 return retcode; 256} 257 258/* Implement basically the same security restrictions as hardware does 259 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 260 * 261 * Most of the calculations below involve calculating the size of a 262 * particular instruction. It's important to get the size right as 263 * that tells us where the next instruction to check is. Any illegal 264 * instruction detected will be given a size of zero, which is a 265 * signal to abort the rest of the buffer. 266 */ 267static int validate_cmd(int cmd) 268{ 269 switch (((cmd >> 29) & 0x7)) { 270 case 0x0: 271 switch ((cmd >> 23) & 0x3f) { 272 case 0x0: 273 return 1; /* MI_NOOP */ 274 case 0x4: 275 return 1; /* MI_FLUSH */ 276 default: 277 return 0; /* disallow everything else */ 278 } 279 break; 280 case 0x1: 281 return 0; /* reserved */ 282 case 0x2: 283 return (cmd & 0xff) + 2; /* 2d commands */ 284 case 0x3: 285 if (((cmd >> 24) & 0x1f) <= 0x18) 286 return 1; 287 288 switch ((cmd >> 24) & 0x1f) { 289 case 0x1c: 290 return 1; 291 case 0x1d: 292 switch ((cmd >> 16) & 0xff) { 293 case 0x3: 294 return (cmd & 0x1f) + 2; 295 case 0x4: 296 return (cmd & 0xf) + 2; 297 default: 298 return (cmd & 0xffff) + 2; 299 } 300 case 0x1e: 301 if (cmd & (1 << 23)) 302 return (cmd & 0xffff) + 1; 303 else 304 return 1; 305 case 0x1f: 306 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 307 return (cmd & 0x1ffff) + 2; 308 else if (cmd & (1 << 17)) /* indirect random */ 309 if ((cmd & 0xffff) == 0) 310 return 0; /* unknown length, too hard */ 311 else 312 return (((cmd & 0xffff) + 1) / 2) + 1; 313 else 314 return 2; /* indirect sequential */ 315 default: 316 return 0; 317 } 318 default: 319 return 0; 320 } 321 322 return 0; 323} 324 325static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 326{ 327 drm_i915_private_t *dev_priv = dev->dev_private; 328 int i, ret; 329 330 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 331 return -EINVAL; 332 333 for (i = 0; i < dwords;) { 334 int sz = validate_cmd(buffer[i]); 335 if (sz == 0 || i + sz > dwords) 336 return -EINVAL; 337 i += sz; 338 } 339 340 ret = BEGIN_LP_RING((dwords+1)&~1); 341 if (ret) 342 return ret; 343 344 for (i = 0; i < dwords; i++) 345 OUT_RING(buffer[i]); 346 if (dwords & 1) 347 OUT_RING(0); 348 349 ADVANCE_LP_RING(); 350 351 return 0; 352} 353 354int 355i915_emit_box(struct drm_device *dev, 356 struct drm_clip_rect *box, 357 int DR1, int DR4) 358{ 359 struct drm_i915_private *dev_priv = dev->dev_private; 360 int ret; 361 362 if (box->y2 <= box->y1 || box->x2 <= box->x1 || 363 box->y2 <= 0 || box->x2 <= 0) { 364 DRM_ERROR("Bad box %d,%d..%d,%d\n", 365 box->x1, box->y1, box->x2, box->y2); 366 return -EINVAL; 367 } 368 369 if (INTEL_INFO(dev)->gen >= 4) { 370 ret = BEGIN_LP_RING(4); 371 if (ret) 372 return ret; 373 374 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 375 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 376 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 377 OUT_RING(DR4); 378 } else { 379 ret = BEGIN_LP_RING(6); 380 if (ret) 381 return ret; 382 383 OUT_RING(GFX_OP_DRAWRECT_INFO); 384 OUT_RING(DR1); 385 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 386 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 387 OUT_RING(DR4); 388 OUT_RING(0); 389 } 390 ADVANCE_LP_RING(); 391 392 return 0; 393} 394 395/* XXX: Emitting the counter should really be moved to part of the IRQ 396 * emit. For now, do it in both places: 397 */ 398 399static void i915_emit_breadcrumb(struct drm_device *dev) 400{ 401 drm_i915_private_t *dev_priv = dev->dev_private; 402 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 403 404 dev_priv->counter++; 405 if (dev_priv->counter > 0x7FFFFFFFUL) 406 dev_priv->counter = 0; 407 if (master_priv->sarea_priv) 408 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 409 410 if (BEGIN_LP_RING(4) == 0) { 411 OUT_RING(MI_STORE_DWORD_INDEX); 412 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 413 OUT_RING(dev_priv->counter); 414 OUT_RING(0); 415 ADVANCE_LP_RING(); 416 } 417} 418 419static int i915_dispatch_cmdbuffer(struct drm_device * dev, 420 drm_i915_cmdbuffer_t *cmd, 421 struct drm_clip_rect *cliprects, 422 void *cmdbuf) 423{ 424 int nbox = cmd->num_cliprects; 425 int i = 0, count, ret; 426 427 if (cmd->sz & 0x3) { 428 DRM_ERROR("alignment"); 429 return -EINVAL; 430 } 431 432 i915_kernel_lost_context(dev); 433 434 count = nbox ? nbox : 1; 435 436 for (i = 0; i < count; i++) { 437 if (i < nbox) { 438 ret = i915_emit_box(dev, &cliprects[i], 439 cmd->DR1, cmd->DR4); 440 if (ret) 441 return ret; 442 } 443 444 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 445 if (ret) 446 return ret; 447 } 448 449 i915_emit_breadcrumb(dev); 450 return 0; 451} 452 453static int i915_dispatch_batchbuffer(struct drm_device * dev, 454 drm_i915_batchbuffer_t * batch, 455 struct drm_clip_rect *cliprects) 456{ 457 struct drm_i915_private *dev_priv = dev->dev_private; 458 int nbox = batch->num_cliprects; 459 int i, count, ret; 460 461 if ((batch->start | batch->used) & 0x7) { 462 DRM_ERROR("alignment"); 463 return -EINVAL; 464 } 465 466 i915_kernel_lost_context(dev); 467 468 count = nbox ? nbox : 1; 469 for (i = 0; i < count; i++) { 470 if (i < nbox) { 471 ret = i915_emit_box(dev, &cliprects[i], 472 batch->DR1, batch->DR4); 473 if (ret) 474 return ret; 475 } 476 477 if (!IS_I830(dev) && !IS_845G(dev)) { 478 ret = BEGIN_LP_RING(2); 479 if (ret) 480 return ret; 481 482 if (INTEL_INFO(dev)->gen >= 4) { 483 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 484 OUT_RING(batch->start); 485 } else { 486 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 487 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 488 } 489 } else { 490 ret = BEGIN_LP_RING(4); 491 if (ret) 492 return ret; 493 494 OUT_RING(MI_BATCH_BUFFER); 495 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 496 OUT_RING(batch->start + batch->used - 4); 497 OUT_RING(0); 498 } 499 ADVANCE_LP_RING(); 500 } 501 502 503 if (IS_G4X(dev) || IS_GEN5(dev)) { 504 if (BEGIN_LP_RING(2) == 0) { 505 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); 506 OUT_RING(MI_NOOP); 507 ADVANCE_LP_RING(); 508 } 509 } 510 511 i915_emit_breadcrumb(dev); 512 return 0; 513} 514 515static int i915_dispatch_flip(struct drm_device * dev) 516{ 517 drm_i915_private_t *dev_priv = dev->dev_private; 518 struct drm_i915_master_private *master_priv = 519 dev->primary->master->driver_priv; 520 int ret; 521 522 if (!master_priv->sarea_priv) 523 return -EINVAL; 524 525 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 526 __func__, 527 dev_priv->current_page, 528 master_priv->sarea_priv->pf_current_page); 529 530 i915_kernel_lost_context(dev); 531 532 ret = BEGIN_LP_RING(10); 533 if (ret) 534 return ret; 535 536 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 537 OUT_RING(0); 538 539 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 540 OUT_RING(0); 541 if (dev_priv->current_page == 0) { 542 OUT_RING(dev_priv->back_offset); 543 dev_priv->current_page = 1; 544 } else { 545 OUT_RING(dev_priv->front_offset); 546 dev_priv->current_page = 0; 547 } 548 OUT_RING(0); 549 550 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 551 OUT_RING(0); 552 553 ADVANCE_LP_RING(); 554 555 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 556 557 if (BEGIN_LP_RING(4) == 0) { 558 OUT_RING(MI_STORE_DWORD_INDEX); 559 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 560 OUT_RING(dev_priv->counter); 561 OUT_RING(0); 562 ADVANCE_LP_RING(); 563 } 564 565 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 566 return 0; 567} 568 569static int i915_quiescent(struct drm_device *dev) 570{ 571 struct intel_ring_buffer *ring = LP_RING(dev->dev_private); 572 573 i915_kernel_lost_context(dev); 574 return intel_wait_ring_idle(ring); 575} 576 577static int i915_flush_ioctl(struct drm_device *dev, void *data, 578 struct drm_file *file_priv) 579{ 580 int ret; 581 582 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 583 584 mutex_lock(&dev->struct_mutex); 585 ret = i915_quiescent(dev); 586 mutex_unlock(&dev->struct_mutex); 587 588 return ret; 589} 590 591static int i915_batchbuffer(struct drm_device *dev, void *data, 592 struct drm_file *file_priv) 593{ 594 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 595 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 596 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 597 master_priv->sarea_priv; 598 drm_i915_batchbuffer_t *batch = data; 599 int ret; 600 struct drm_clip_rect *cliprects = NULL; 601 602 if (!dev_priv->allow_batchbuffer) { 603 DRM_ERROR("Batchbuffer ioctl disabled\n"); 604 return -EINVAL; 605 } 606 607 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", 608 batch->start, batch->used, batch->num_cliprects); 609 610 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 611 612 if (batch->num_cliprects < 0) 613 return -EINVAL; 614 615 if (batch->num_cliprects) { 616 cliprects = kcalloc(batch->num_cliprects, 617 sizeof(struct drm_clip_rect), 618 GFP_KERNEL); 619 if (cliprects == NULL) 620 return -ENOMEM; 621 622 ret = copy_from_user(cliprects, batch->cliprects, 623 batch->num_cliprects * 624 sizeof(struct drm_clip_rect)); 625 if (ret != 0) { 626 ret = -EFAULT; 627 goto fail_free; 628 } 629 } 630 631 mutex_lock(&dev->struct_mutex); 632 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 633 mutex_unlock(&dev->struct_mutex); 634 635 if (sarea_priv) 636 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 637 638fail_free: 639 kfree(cliprects); 640 641 return ret; 642} 643 644static int i915_cmdbuffer(struct drm_device *dev, void *data, 645 struct drm_file *file_priv) 646{ 647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 648 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 649 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 650 master_priv->sarea_priv; 651 drm_i915_cmdbuffer_t *cmdbuf = data; 652 struct drm_clip_rect *cliprects = NULL; 653 void *batch_data; 654 int ret; 655 656 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 657 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 658 659 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 660 661 if (cmdbuf->num_cliprects < 0) 662 return -EINVAL; 663 664 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); 665 if (batch_data == NULL) 666 return -ENOMEM; 667 668 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 669 if (ret != 0) { 670 ret = -EFAULT; 671 goto fail_batch_free; 672 } 673 674 if (cmdbuf->num_cliprects) { 675 cliprects = kcalloc(cmdbuf->num_cliprects, 676 sizeof(struct drm_clip_rect), GFP_KERNEL); 677 if (cliprects == NULL) { 678 ret = -ENOMEM; 679 goto fail_batch_free; 680 } 681 682 ret = copy_from_user(cliprects, cmdbuf->cliprects, 683 cmdbuf->num_cliprects * 684 sizeof(struct drm_clip_rect)); 685 if (ret != 0) { 686 ret = -EFAULT; 687 goto fail_clip_free; 688 } 689 } 690 691 mutex_lock(&dev->struct_mutex); 692 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 693 mutex_unlock(&dev->struct_mutex); 694 if (ret) { 695 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 696 goto fail_clip_free; 697 } 698 699 if (sarea_priv) 700 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 701 702fail_clip_free: 703 kfree(cliprects); 704fail_batch_free: 705 kfree(batch_data); 706 707 return ret; 708} 709 710static int i915_flip_bufs(struct drm_device *dev, void *data, 711 struct drm_file *file_priv) 712{ 713 int ret; 714 715 DRM_DEBUG_DRIVER("%s\n", __func__); 716 717 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 718 719 mutex_lock(&dev->struct_mutex); 720 ret = i915_dispatch_flip(dev); 721 mutex_unlock(&dev->struct_mutex); 722 723 return ret; 724} 725 726static int i915_getparam(struct drm_device *dev, void *data, 727 struct drm_file *file_priv) 728{ 729 drm_i915_private_t *dev_priv = dev->dev_private; 730 drm_i915_getparam_t *param = data; 731 int value; 732 733 if (!dev_priv) { 734 DRM_ERROR("called with no initialization\n"); 735 return -EINVAL; 736 } 737 738 switch (param->param) { 739 case I915_PARAM_IRQ_ACTIVE: 740 value = dev->pdev->irq ? 1 : 0; 741 break; 742 case I915_PARAM_ALLOW_BATCHBUFFER: 743 value = dev_priv->allow_batchbuffer ? 1 : 0; 744 break; 745 case I915_PARAM_LAST_DISPATCH: 746 value = READ_BREADCRUMB(dev_priv); 747 break; 748 case I915_PARAM_CHIPSET_ID: 749 value = dev->pci_device; 750 break; 751 case I915_PARAM_HAS_GEM: 752 value = dev_priv->has_gem; 753 break; 754 case I915_PARAM_NUM_FENCES_AVAIL: 755 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 756 break; 757 case I915_PARAM_HAS_OVERLAY: 758 value = dev_priv->overlay ? 1 : 0; 759 break; 760 case I915_PARAM_HAS_PAGEFLIPPING: 761 value = 1; 762 break; 763 case I915_PARAM_HAS_EXECBUF2: 764 /* depends on GEM */ 765 value = dev_priv->has_gem; 766 break; 767 case I915_PARAM_HAS_BSD: 768 value = HAS_BSD(dev); 769 break; 770 case I915_PARAM_HAS_BLT: 771 value = HAS_BLT(dev); 772 break; 773 case I915_PARAM_HAS_RELAXED_FENCING: 774 value = 1; 775 break; 776 case I915_PARAM_HAS_COHERENT_RINGS: 777 value = 1; 778 break; 779 case I915_PARAM_HAS_EXEC_CONSTANTS: 780 value = INTEL_INFO(dev)->gen >= 4; 781 break; 782 case I915_PARAM_HAS_RELAXED_DELTA: 783 value = 1; 784 break; 785 default: 786 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 787 param->param); 788 return -EINVAL; 789 } 790 791 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 792 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 793 return -EFAULT; 794 } 795 796 return 0; 797} 798 799static int i915_setparam(struct drm_device *dev, void *data, 800 struct drm_file *file_priv) 801{ 802 drm_i915_private_t *dev_priv = dev->dev_private; 803 drm_i915_setparam_t *param = data; 804 805 if (!dev_priv) { 806 DRM_ERROR("called with no initialization\n"); 807 return -EINVAL; 808 } 809 810 switch (param->param) { 811 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 812 break; 813 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 814 dev_priv->tex_lru_log_granularity = param->value; 815 break; 816 case I915_SETPARAM_ALLOW_BATCHBUFFER: 817 dev_priv->allow_batchbuffer = param->value; 818 break; 819 case I915_SETPARAM_NUM_USED_FENCES: 820 if (param->value > dev_priv->num_fence_regs || 821 param->value < 0) 822 return -EINVAL; 823 /* Userspace can use first N regs */ 824 dev_priv->fence_reg_start = param->value; 825 break; 826 default: 827 DRM_DEBUG_DRIVER("unknown parameter %d\n", 828 param->param); 829 return -EINVAL; 830 } 831 832 return 0; 833} 834 835static int i915_set_status_page(struct drm_device *dev, void *data, 836 struct drm_file *file_priv) 837{ 838 drm_i915_private_t *dev_priv = dev->dev_private; 839 drm_i915_hws_addr_t *hws = data; 840 struct intel_ring_buffer *ring = LP_RING(dev_priv); 841 842 if (!I915_NEED_GFX_HWS(dev)) 843 return -EINVAL; 844 845 if (!dev_priv) { 846 DRM_ERROR("called with no initialization\n"); 847 return -EINVAL; 848 } 849 850 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 851 WARN(1, "tried to set status page when mode setting active\n"); 852 return 0; 853 } 854 855 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 856 857 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 858 859 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 860 dev_priv->hws_map.size = 4*1024; 861 dev_priv->hws_map.type = 0; 862 dev_priv->hws_map.flags = 0; 863 dev_priv->hws_map.mtrr = 0; 864 865 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 866 if (dev_priv->hws_map.handle == NULL) { 867 i915_dma_cleanup(dev); 868 ring->status_page.gfx_addr = 0; 869 DRM_ERROR("can not ioremap virtual address for" 870 " G33 hw status page\n"); 871 return -ENOMEM; 872 } 873 ring->status_page.page_addr = 874 (void __force __iomem *)dev_priv->hws_map.handle; 875 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); 876 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 877 878 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 879 ring->status_page.gfx_addr); 880 DRM_DEBUG_DRIVER("load hws at %p\n", 881 ring->status_page.page_addr); 882 return 0; 883} 884 885static int i915_get_bridge_dev(struct drm_device *dev) 886{ 887 struct drm_i915_private *dev_priv = dev->dev_private; 888 889 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 890 if (!dev_priv->bridge_dev) { 891 DRM_ERROR("bridge device not found\n"); 892 return -1; 893 } 894 return 0; 895} 896 897#define MCHBAR_I915 0x44 898#define MCHBAR_I965 0x48 899#define MCHBAR_SIZE (4*4096) 900 901#define DEVEN_REG 0x54 902#define DEVEN_MCHBAR_EN (1 << 28) 903 904/* Allocate space for the MCH regs if needed, return nonzero on error */ 905static int 906intel_alloc_mchbar_resource(struct drm_device *dev) 907{ 908 drm_i915_private_t *dev_priv = dev->dev_private; 909 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 910 u32 temp_lo, temp_hi = 0; 911 u64 mchbar_addr; 912 int ret; 913 914 if (INTEL_INFO(dev)->gen >= 4) 915 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 916 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 917 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 918 919 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 920#ifdef CONFIG_PNP 921 if (mchbar_addr && 922 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 923 return 0; 924#endif 925 926 /* Get some space for it */ 927 dev_priv->mch_res.name = "i915 MCHBAR"; 928 dev_priv->mch_res.flags = IORESOURCE_MEM; 929 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, 930 &dev_priv->mch_res, 931 MCHBAR_SIZE, MCHBAR_SIZE, 932 PCIBIOS_MIN_MEM, 933 0, pcibios_align_resource, 934 dev_priv->bridge_dev); 935 if (ret) { 936 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 937 dev_priv->mch_res.start = 0; 938 return ret; 939 } 940 941 if (INTEL_INFO(dev)->gen >= 4) 942 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 943 upper_32_bits(dev_priv->mch_res.start)); 944 945 pci_write_config_dword(dev_priv->bridge_dev, reg, 946 lower_32_bits(dev_priv->mch_res.start)); 947 return 0; 948} 949 950/* Setup MCHBAR if possible, return true if we should disable it again */ 951static void 952intel_setup_mchbar(struct drm_device *dev) 953{ 954 drm_i915_private_t *dev_priv = dev->dev_private; 955 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 956 u32 temp; 957 bool enabled; 958 959 dev_priv->mchbar_need_disable = false; 960 961 if (IS_I915G(dev) || IS_I915GM(dev)) { 962 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 963 enabled = !!(temp & DEVEN_MCHBAR_EN); 964 } else { 965 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 966 enabled = temp & 1; 967 } 968 969 /* If it's already enabled, don't have to do anything */ 970 if (enabled) 971 return; 972 973 if (intel_alloc_mchbar_resource(dev)) 974 return; 975 976 dev_priv->mchbar_need_disable = true; 977 978 /* Space is allocated or reserved, so enable it. */ 979 if (IS_I915G(dev) || IS_I915GM(dev)) { 980 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, 981 temp | DEVEN_MCHBAR_EN); 982 } else { 983 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 984 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 985 } 986} 987 988static void 989intel_teardown_mchbar(struct drm_device *dev) 990{ 991 drm_i915_private_t *dev_priv = dev->dev_private; 992 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 993 u32 temp; 994 995 if (dev_priv->mchbar_need_disable) { 996 if (IS_I915G(dev) || IS_I915GM(dev)) { 997 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 998 temp &= ~DEVEN_MCHBAR_EN; 999 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); 1000 } else { 1001 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 1002 temp &= ~1; 1003 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); 1004 } 1005 } 1006 1007 if (dev_priv->mch_res.start) 1008 release_resource(&dev_priv->mch_res); 1009} 1010 1011#define PTE_ADDRESS_MASK 0xfffff000 1012#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 1013#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 1014#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ 1015#define PTE_MAPPING_TYPE_CACHED (3 << 1) 1016#define PTE_MAPPING_TYPE_MASK (3 << 1) 1017#define PTE_VALID (1 << 0) 1018 1019/** 1020 * i915_stolen_to_phys - take an offset into stolen memory and turn it into 1021 * a physical one 1022 * @dev: drm device 1023 * @offset: address to translate 1024 * 1025 * Some chip functions require allocations from stolen space and need the 1026 * physical address of the memory in question. 1027 */ 1028static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) 1029{ 1030 struct drm_i915_private *dev_priv = dev->dev_private; 1031 struct pci_dev *pdev = dev_priv->bridge_dev; 1032 u32 base; 1033 1034#if 0 1035 /* On the machines I have tested the Graphics Base of Stolen Memory 1036 * is unreliable, so compute the base by subtracting the stolen memory 1037 * from the Top of Low Usable DRAM which is where the BIOS places 1038 * the graphics stolen memory. 1039 */ 1040 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 1041 /* top 32bits are reserved = 0 */ 1042 pci_read_config_dword(pdev, 0xA4, &base); 1043 } else { 1044 /* XXX presume 8xx is the same as i915 */ 1045 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); 1046 } 1047#else 1048 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 1049 u16 val; 1050 pci_read_config_word(pdev, 0xb0, &val); 1051 base = val >> 4 << 20; 1052 } else { 1053 u8 val; 1054 pci_read_config_byte(pdev, 0x9c, &val); 1055 base = val >> 3 << 27; 1056 } 1057 base -= dev_priv->mm.gtt->stolen_size; 1058#endif 1059 1060 return base + offset; 1061} 1062 1063static void i915_warn_stolen(struct drm_device *dev) 1064{ 1065 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); 1066 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); 1067} 1068 1069static void i915_setup_compression(struct drm_device *dev, int size) 1070{ 1071 struct drm_i915_private *dev_priv = dev->dev_private; 1072 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 1073 unsigned long cfb_base; 1074 unsigned long ll_base = 0; 1075 1076 /* Just in case the BIOS is doing something questionable. */ 1077 intel_disable_fbc(dev); 1078 1079 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 1080 if (compressed_fb) 1081 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1082 if (!compressed_fb) 1083 goto err; 1084 1085 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); 1086 if (!cfb_base) 1087 goto err_fb; 1088 1089 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { 1090 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 1091 4096, 4096, 0); 1092 if (compressed_llb) 1093 compressed_llb = drm_mm_get_block(compressed_llb, 1094 4096, 4096); 1095 if (!compressed_llb) 1096 goto err_fb; 1097 1098 ll_base = i915_stolen_to_phys(dev, compressed_llb->start); 1099 if (!ll_base) 1100 goto err_llb; 1101 } 1102 1103 dev_priv->cfb_size = size; 1104 1105 dev_priv->compressed_fb = compressed_fb; 1106 if (HAS_PCH_SPLIT(dev)) 1107 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1108 else if (IS_GM45(dev)) { 1109 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1110 } else { 1111 I915_WRITE(FBC_CFB_BASE, cfb_base); 1112 I915_WRITE(FBC_LL_BASE, ll_base); 1113 dev_priv->compressed_llb = compressed_llb; 1114 } 1115 1116 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", 1117 cfb_base, ll_base, size >> 20); 1118 return; 1119 1120err_llb: 1121 drm_mm_put_block(compressed_llb); 1122err_fb: 1123 drm_mm_put_block(compressed_fb); 1124err: 1125 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1126 i915_warn_stolen(dev); 1127} 1128 1129static void i915_cleanup_compression(struct drm_device *dev) 1130{ 1131 struct drm_i915_private *dev_priv = dev->dev_private; 1132 1133 drm_mm_put_block(dev_priv->compressed_fb); 1134 if (dev_priv->compressed_llb) 1135 drm_mm_put_block(dev_priv->compressed_llb); 1136} 1137 1138/* true = enable decode, false = disable decoder */ 1139static unsigned int i915_vga_set_decode(void *cookie, bool state) 1140{ 1141 struct drm_device *dev = cookie; 1142 1143 intel_modeset_vga_set_state(dev, state); 1144 if (state) 1145 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1146 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1147 else 1148 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1149} 1150 1151static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1152{ 1153 struct drm_device *dev = pci_get_drvdata(pdev); 1154 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1155 if (state == VGA_SWITCHEROO_ON) { 1156 printk(KERN_INFO "i915: switched on\n"); 1157 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1158 /* i915 resume handler doesn't set to D0 */ 1159 pci_set_power_state(dev->pdev, PCI_D0); 1160 i915_resume(dev); 1161 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1162 } else { 1163 printk(KERN_ERR "i915: switched off\n"); 1164 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1165 i915_suspend(dev, pmm); 1166 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1167 } 1168} 1169 1170static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 1171{ 1172 struct drm_device *dev = pci_get_drvdata(pdev); 1173 bool can_switch; 1174 1175 spin_lock(&dev->count_lock); 1176 can_switch = (dev->open_count == 0); 1177 spin_unlock(&dev->count_lock); 1178 return can_switch; 1179} 1180 1181static int i915_load_gem_init(struct drm_device *dev) 1182{ 1183 struct drm_i915_private *dev_priv = dev->dev_private; 1184 unsigned long prealloc_size, gtt_size, mappable_size; 1185 int ret; 1186 1187 prealloc_size = dev_priv->mm.gtt->stolen_size; 1188 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; 1189 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1190 1191 /* Basic memrange allocator for stolen space */ 1192 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); 1193 1194 /* Let GEM Manage all of the aperture. 1195 * 1196 * However, leave one page at the end still bound to the scratch page. 1197 * There are a number of places where the hardware apparently 1198 * prefetches past the end of the object, and we've seen multiple 1199 * hangs with the GPU head pointer stuck in a batchbuffer bound 1200 * at the last page of the aperture. One page should be enough to 1201 * keep any prefetching inside of the aperture. 1202 */ 1203 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); 1204 1205 mutex_lock(&dev->struct_mutex); 1206 ret = i915_gem_init_ringbuffer(dev); 1207 mutex_unlock(&dev->struct_mutex); 1208 if (ret) 1209 return ret; 1210 1211 /* Try to set up FBC with a reasonable compressed buffer size */ 1212 if (I915_HAS_FBC(dev) && i915_powersave) { 1213 int cfb_size; 1214 1215 /* Leave 1M for line length buffer & misc. */ 1216 1217 /* Try to get a 32M buffer... */ 1218 if (prealloc_size > (36*1024*1024)) 1219 cfb_size = 32*1024*1024; 1220 else /* fall back to 7/8 of the stolen space */ 1221 cfb_size = prealloc_size * 7 / 8; 1222 i915_setup_compression(dev, cfb_size); 1223 } 1224 1225 /* Allow hardware batchbuffers unless told otherwise. */ 1226 dev_priv->allow_batchbuffer = 1; 1227 return 0; 1228} 1229 1230static int i915_load_modeset_init(struct drm_device *dev) 1231{ 1232 struct drm_i915_private *dev_priv = dev->dev_private; 1233 int ret; 1234 1235 ret = intel_parse_bios(dev); 1236 if (ret) 1237 DRM_INFO("failed to find VBIOS tables\n"); 1238 1239 /* If we have > 1 VGA cards, then we need to arbitrate access 1240 * to the common VGA resources. 1241 * 1242 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), 1243 * then we do not take part in VGA arbitration and the 1244 * vga_client_register() fails with -ENODEV. 1245 */ 1246 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1247 if (ret && ret != -ENODEV) 1248 goto out; 1249 1250 intel_register_dsm_handler(); 1251 1252 ret = vga_switcheroo_register_client(dev->pdev, 1253 i915_switcheroo_set_state, 1254 NULL, 1255 i915_switcheroo_can_switch); 1256 if (ret) 1257 goto cleanup_vga_client; 1258 1259 /* IIR "flip pending" bit means done if this bit is set */ 1260 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) 1261 dev_priv->flip_pending_is_done = true; 1262 1263 intel_modeset_init(dev); 1264 1265 ret = i915_load_gem_init(dev); 1266 if (ret) 1267 goto cleanup_vga_switcheroo; 1268 1269 intel_modeset_gem_init(dev); 1270 1271 ret = drm_irq_install(dev); 1272 if (ret) 1273 goto cleanup_gem; 1274 1275 /* Always safe in the mode setting case. */ 1276 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1277 dev->vblank_disable_allowed = 1; 1278 1279 ret = intel_fbdev_init(dev); 1280 if (ret) 1281 goto cleanup_irq; 1282 1283 drm_kms_helper_poll_init(dev); 1284 1285 /* We're off and running w/KMS */ 1286 dev_priv->mm.suspended = 0; 1287 1288 return 0; 1289 1290cleanup_irq: 1291 drm_irq_uninstall(dev); 1292cleanup_gem: 1293 mutex_lock(&dev->struct_mutex); 1294 i915_gem_cleanup_ringbuffer(dev); 1295 mutex_unlock(&dev->struct_mutex); 1296cleanup_vga_switcheroo: 1297 vga_switcheroo_unregister_client(dev->pdev); 1298cleanup_vga_client: 1299 vga_client_register(dev->pdev, NULL, NULL, NULL); 1300out: 1301 return ret; 1302} 1303 1304int i915_master_create(struct drm_device *dev, struct drm_master *master) 1305{ 1306 struct drm_i915_master_private *master_priv; 1307 1308 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 1309 if (!master_priv) 1310 return -ENOMEM; 1311 1312 master->driver_priv = master_priv; 1313 return 0; 1314} 1315 1316void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1317{ 1318 struct drm_i915_master_private *master_priv = master->driver_priv; 1319 1320 if (!master_priv) 1321 return; 1322 1323 kfree(master_priv); 1324 1325 master->driver_priv = NULL; 1326} 1327 1328static void i915_pineview_get_mem_freq(struct drm_device *dev) 1329{ 1330 drm_i915_private_t *dev_priv = dev->dev_private; 1331 u32 tmp; 1332 1333 tmp = I915_READ(CLKCFG); 1334 1335 switch (tmp & CLKCFG_FSB_MASK) { 1336 case CLKCFG_FSB_533: 1337 dev_priv->fsb_freq = 533; /* 133*4 */ 1338 break; 1339 case CLKCFG_FSB_800: 1340 dev_priv->fsb_freq = 800; /* 200*4 */ 1341 break; 1342 case CLKCFG_FSB_667: 1343 dev_priv->fsb_freq = 667; /* 167*4 */ 1344 break; 1345 case CLKCFG_FSB_400: 1346 dev_priv->fsb_freq = 400; /* 100*4 */ 1347 break; 1348 } 1349 1350 switch (tmp & CLKCFG_MEM_MASK) { 1351 case CLKCFG_MEM_533: 1352 dev_priv->mem_freq = 533; 1353 break; 1354 case CLKCFG_MEM_667: 1355 dev_priv->mem_freq = 667; 1356 break; 1357 case CLKCFG_MEM_800: 1358 dev_priv->mem_freq = 800; 1359 break; 1360 } 1361 1362 /* detect pineview DDR3 setting */ 1363 tmp = I915_READ(CSHRDDR3CTL); 1364 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 1365} 1366 1367static void i915_ironlake_get_mem_freq(struct drm_device *dev) 1368{ 1369 drm_i915_private_t *dev_priv = dev->dev_private; 1370 u16 ddrpll, csipll; 1371 1372 ddrpll = I915_READ16(DDRMPLL1); 1373 csipll = I915_READ16(CSIPLL0); 1374 1375 switch (ddrpll & 0xff) { 1376 case 0xc: 1377 dev_priv->mem_freq = 800; 1378 break; 1379 case 0x10: 1380 dev_priv->mem_freq = 1066; 1381 break; 1382 case 0x14: 1383 dev_priv->mem_freq = 1333; 1384 break; 1385 case 0x18: 1386 dev_priv->mem_freq = 1600; 1387 break; 1388 default: 1389 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 1390 ddrpll & 0xff); 1391 dev_priv->mem_freq = 0; 1392 break; 1393 } 1394 1395 dev_priv->r_t = dev_priv->mem_freq; 1396 1397 switch (csipll & 0x3ff) { 1398 case 0x00c: 1399 dev_priv->fsb_freq = 3200; 1400 break; 1401 case 0x00e: 1402 dev_priv->fsb_freq = 3733; 1403 break; 1404 case 0x010: 1405 dev_priv->fsb_freq = 4266; 1406 break; 1407 case 0x012: 1408 dev_priv->fsb_freq = 4800; 1409 break; 1410 case 0x014: 1411 dev_priv->fsb_freq = 5333; 1412 break; 1413 case 0x016: 1414 dev_priv->fsb_freq = 5866; 1415 break; 1416 case 0x018: 1417 dev_priv->fsb_freq = 6400; 1418 break; 1419 default: 1420 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 1421 csipll & 0x3ff); 1422 dev_priv->fsb_freq = 0; 1423 break; 1424 } 1425 1426 if (dev_priv->fsb_freq == 3200) { 1427 dev_priv->c_m = 0; 1428 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 1429 dev_priv->c_m = 1; 1430 } else { 1431 dev_priv->c_m = 2; 1432 } 1433} 1434 1435static const struct cparams { 1436 u16 i; 1437 u16 t; 1438 u16 m; 1439 u16 c; 1440} cparams[] = { 1441 { 1, 1333, 301, 28664 }, 1442 { 1, 1066, 294, 24460 }, 1443 { 1, 800, 294, 25192 }, 1444 { 0, 1333, 276, 27605 }, 1445 { 0, 1066, 276, 27605 }, 1446 { 0, 800, 231, 23784 }, 1447}; 1448 1449unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 1450{ 1451 u64 total_count, diff, ret; 1452 u32 count1, count2, count3, m = 0, c = 0; 1453 unsigned long now = jiffies_to_msecs(jiffies), diff1; 1454 int i; 1455 1456 diff1 = now - dev_priv->last_time1; 1457 1458 count1 = I915_READ(DMIEC); 1459 count2 = I915_READ(DDREC); 1460 count3 = I915_READ(CSIEC); 1461 1462 total_count = count1 + count2 + count3; 1463 1464 /* FIXME: handle per-counter overflow */ 1465 if (total_count < dev_priv->last_count1) { 1466 diff = ~0UL - dev_priv->last_count1; 1467 diff += total_count; 1468 } else { 1469 diff = total_count - dev_priv->last_count1; 1470 } 1471 1472 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 1473 if (cparams[i].i == dev_priv->c_m && 1474 cparams[i].t == dev_priv->r_t) { 1475 m = cparams[i].m; 1476 c = cparams[i].c; 1477 break; 1478 } 1479 } 1480 1481 diff = div_u64(diff, diff1); 1482 ret = ((m * diff) + c); 1483 ret = div_u64(ret, 10); 1484 1485 dev_priv->last_count1 = total_count; 1486 dev_priv->last_time1 = now; 1487 1488 return ret; 1489} 1490 1491unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 1492{ 1493 unsigned long m, x, b; 1494 u32 tsfs; 1495 1496 tsfs = I915_READ(TSFS); 1497 1498 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 1499 x = I915_READ8(TR1); 1500 1501 b = tsfs & TSFS_INTR_MASK; 1502 1503 return ((m * x) / 127) - b; 1504} 1505 1506static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 1507{ 1508 static const struct v_table { 1509 u16 vd; /* in .1 mil */ 1510 u16 vm; /* in .1 mil */ 1511 } v_table[] = { 1512 { 0, 0, }, 1513 { 375, 0, }, 1514 { 500, 0, }, 1515 { 625, 0, }, 1516 { 750, 0, }, 1517 { 875, 0, }, 1518 { 1000, 0, }, 1519 { 1125, 0, }, 1520 { 4125, 3000, }, 1521 { 4125, 3000, }, 1522 { 4125, 3000, }, 1523 { 4125, 3000, }, 1524 { 4125, 3000, }, 1525 { 4125, 3000, }, 1526 { 4125, 3000, }, 1527 { 4125, 3000, }, 1528 { 4125, 3000, }, 1529 { 4125, 3000, }, 1530 { 4125, 3000, }, 1531 { 4125, 3000, }, 1532 { 4125, 3000, }, 1533 { 4125, 3000, }, 1534 { 4125, 3000, }, 1535 { 4125, 3000, }, 1536 { 4125, 3000, }, 1537 { 4125, 3000, }, 1538 { 4125, 3000, }, 1539 { 4125, 3000, }, 1540 { 4125, 3000, }, 1541 { 4125, 3000, }, 1542 { 4125, 3000, }, 1543 { 4125, 3000, }, 1544 { 4250, 3125, }, 1545 { 4375, 3250, }, 1546 { 4500, 3375, }, 1547 { 4625, 3500, }, 1548 { 4750, 3625, }, 1549 { 4875, 3750, }, 1550 { 5000, 3875, }, 1551 { 5125, 4000, }, 1552 { 5250, 4125, }, 1553 { 5375, 4250, }, 1554 { 5500, 4375, }, 1555 { 5625, 4500, }, 1556 { 5750, 4625, }, 1557 { 5875, 4750, }, 1558 { 6000, 4875, }, 1559 { 6125, 5000, }, 1560 { 6250, 5125, }, 1561 { 6375, 5250, }, 1562 { 6500, 5375, }, 1563 { 6625, 5500, }, 1564 { 6750, 5625, }, 1565 { 6875, 5750, }, 1566 { 7000, 5875, }, 1567 { 7125, 6000, }, 1568 { 7250, 6125, }, 1569 { 7375, 6250, }, 1570 { 7500, 6375, }, 1571 { 7625, 6500, }, 1572 { 7750, 6625, }, 1573 { 7875, 6750, }, 1574 { 8000, 6875, }, 1575 { 8125, 7000, }, 1576 { 8250, 7125, }, 1577 { 8375, 7250, }, 1578 { 8500, 7375, }, 1579 { 8625, 7500, }, 1580 { 8750, 7625, }, 1581 { 8875, 7750, }, 1582 { 9000, 7875, }, 1583 { 9125, 8000, }, 1584 { 9250, 8125, }, 1585 { 9375, 8250, }, 1586 { 9500, 8375, }, 1587 { 9625, 8500, }, 1588 { 9750, 8625, }, 1589 { 9875, 8750, }, 1590 { 10000, 8875, }, 1591 { 10125, 9000, }, 1592 { 10250, 9125, }, 1593 { 10375, 9250, }, 1594 { 10500, 9375, }, 1595 { 10625, 9500, }, 1596 { 10750, 9625, }, 1597 { 10875, 9750, }, 1598 { 11000, 9875, }, 1599 { 11125, 10000, }, 1600 { 11250, 10125, }, 1601 { 11375, 10250, }, 1602 { 11500, 10375, }, 1603 { 11625, 10500, }, 1604 { 11750, 10625, }, 1605 { 11875, 10750, }, 1606 { 12000, 10875, }, 1607 { 12125, 11000, }, 1608 { 12250, 11125, }, 1609 { 12375, 11250, }, 1610 { 12500, 11375, }, 1611 { 12625, 11500, }, 1612 { 12750, 11625, }, 1613 { 12875, 11750, }, 1614 { 13000, 11875, }, 1615 { 13125, 12000, }, 1616 { 13250, 12125, }, 1617 { 13375, 12250, }, 1618 { 13500, 12375, }, 1619 { 13625, 12500, }, 1620 { 13750, 12625, }, 1621 { 13875, 12750, }, 1622 { 14000, 12875, }, 1623 { 14125, 13000, }, 1624 { 14250, 13125, }, 1625 { 14375, 13250, }, 1626 { 14500, 13375, }, 1627 { 14625, 13500, }, 1628 { 14750, 13625, }, 1629 { 14875, 13750, }, 1630 { 15000, 13875, }, 1631 { 15125, 14000, }, 1632 { 15250, 14125, }, 1633 { 15375, 14250, }, 1634 { 15500, 14375, }, 1635 { 15625, 14500, }, 1636 { 15750, 14625, }, 1637 { 15875, 14750, }, 1638 { 16000, 14875, }, 1639 { 16125, 15000, }, 1640 }; 1641 if (dev_priv->info->is_mobile) 1642 return v_table[pxvid].vm; 1643 else 1644 return v_table[pxvid].vd; 1645} 1646 1647void i915_update_gfx_val(struct drm_i915_private *dev_priv) 1648{ 1649 struct timespec now, diff1; 1650 u64 diff; 1651 unsigned long diffms; 1652 u32 count; 1653 1654 getrawmonotonic(&now); 1655 diff1 = timespec_sub(now, dev_priv->last_time2); 1656 1657 /* Don't divide by 0 */ 1658 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 1659 if (!diffms) 1660 return; 1661 1662 count = I915_READ(GFXEC); 1663 1664 if (count < dev_priv->last_count2) { 1665 diff = ~0UL - dev_priv->last_count2; 1666 diff += count; 1667 } else { 1668 diff = count - dev_priv->last_count2; 1669 } 1670 1671 dev_priv->last_count2 = count; 1672 dev_priv->last_time2 = now; 1673 1674 /* More magic constants... */ 1675 diff = diff * 1181; 1676 diff = div_u64(diff, diffms * 10); 1677 dev_priv->gfx_power = diff; 1678} 1679 1680unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 1681{ 1682 unsigned long t, corr, state1, corr2, state2; 1683 u32 pxvid, ext_v; 1684 1685 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 1686 pxvid = (pxvid >> 24) & 0x7f; 1687 ext_v = pvid_to_extvid(dev_priv, pxvid); 1688 1689 state1 = ext_v; 1690 1691 t = i915_mch_val(dev_priv); 1692 1693 /* Revel in the empirically derived constants */ 1694 1695 /* Correction factor in 1/100000 units */ 1696 if (t > 80) 1697 corr = ((t * 2349) + 135940); 1698 else if (t >= 50) 1699 corr = ((t * 964) + 29317); 1700 else /* < 50 */ 1701 corr = ((t * 301) + 1004); 1702 1703 corr = corr * ((150142 * state1) / 10000 - 78642); 1704 corr /= 100000; 1705 corr2 = (corr * dev_priv->corr); 1706 1707 state2 = (corr2 * state1) / 10000; 1708 state2 /= 100; /* convert to mW */ 1709 1710 i915_update_gfx_val(dev_priv); 1711 1712 return dev_priv->gfx_power + state2; 1713} 1714 1715/* Global for IPS driver to get at the current i915 device */ 1716static struct drm_i915_private *i915_mch_dev; 1717/* 1718 * Lock protecting IPS related data structures 1719 * - i915_mch_dev 1720 * - dev_priv->max_delay 1721 * - dev_priv->min_delay 1722 * - dev_priv->fmax 1723 * - dev_priv->gpu_busy 1724 */ 1725static DEFINE_SPINLOCK(mchdev_lock); 1726 1727/** 1728 * i915_read_mch_val - return value for IPS use 1729 * 1730 * Calculate and return a value for the IPS driver to use when deciding whether 1731 * we have thermal and power headroom to increase CPU or GPU power budget. 1732 */ 1733unsigned long i915_read_mch_val(void) 1734{ 1735 struct drm_i915_private *dev_priv; 1736 unsigned long chipset_val, graphics_val, ret = 0; 1737 1738 spin_lock(&mchdev_lock); 1739 if (!i915_mch_dev) 1740 goto out_unlock; 1741 dev_priv = i915_mch_dev; 1742 1743 chipset_val = i915_chipset_val(dev_priv); 1744 graphics_val = i915_gfx_val(dev_priv); 1745 1746 ret = chipset_val + graphics_val; 1747 1748out_unlock: 1749 spin_unlock(&mchdev_lock); 1750 1751 return ret; 1752} 1753EXPORT_SYMBOL_GPL(i915_read_mch_val); 1754 1755/** 1756 * i915_gpu_raise - raise GPU frequency limit 1757 * 1758 * Raise the limit; IPS indicates we have thermal headroom. 1759 */ 1760bool i915_gpu_raise(void) 1761{ 1762 struct drm_i915_private *dev_priv; 1763 bool ret = true; 1764 1765 spin_lock(&mchdev_lock); 1766 if (!i915_mch_dev) { 1767 ret = false; 1768 goto out_unlock; 1769 } 1770 dev_priv = i915_mch_dev; 1771 1772 if (dev_priv->max_delay > dev_priv->fmax) 1773 dev_priv->max_delay--; 1774 1775out_unlock: 1776 spin_unlock(&mchdev_lock); 1777 1778 return ret; 1779} 1780EXPORT_SYMBOL_GPL(i915_gpu_raise); 1781 1782/** 1783 * i915_gpu_lower - lower GPU frequency limit 1784 * 1785 * IPS indicates we're close to a thermal limit, so throttle back the GPU 1786 * frequency maximum. 1787 */ 1788bool i915_gpu_lower(void) 1789{ 1790 struct drm_i915_private *dev_priv; 1791 bool ret = true; 1792 1793 spin_lock(&mchdev_lock); 1794 if (!i915_mch_dev) { 1795 ret = false; 1796 goto out_unlock; 1797 } 1798 dev_priv = i915_mch_dev; 1799 1800 if (dev_priv->max_delay < dev_priv->min_delay) 1801 dev_priv->max_delay++; 1802 1803out_unlock: 1804 spin_unlock(&mchdev_lock); 1805 1806 return ret; 1807} 1808EXPORT_SYMBOL_GPL(i915_gpu_lower); 1809 1810/** 1811 * i915_gpu_busy - indicate GPU business to IPS 1812 * 1813 * Tell the IPS driver whether or not the GPU is busy. 1814 */ 1815bool i915_gpu_busy(void) 1816{ 1817 struct drm_i915_private *dev_priv; 1818 bool ret = false; 1819 1820 spin_lock(&mchdev_lock); 1821 if (!i915_mch_dev) 1822 goto out_unlock; 1823 dev_priv = i915_mch_dev; 1824 1825 ret = dev_priv->busy; 1826 1827out_unlock: 1828 spin_unlock(&mchdev_lock); 1829 1830 return ret; 1831} 1832EXPORT_SYMBOL_GPL(i915_gpu_busy); 1833 1834/** 1835 * i915_gpu_turbo_disable - disable graphics turbo 1836 * 1837 * Disable graphics turbo by resetting the max frequency and setting the 1838 * current frequency to the default. 1839 */ 1840bool i915_gpu_turbo_disable(void) 1841{ 1842 struct drm_i915_private *dev_priv; 1843 bool ret = true; 1844 1845 spin_lock(&mchdev_lock); 1846 if (!i915_mch_dev) { 1847 ret = false; 1848 goto out_unlock; 1849 } 1850 dev_priv = i915_mch_dev; 1851 1852 dev_priv->max_delay = dev_priv->fstart; 1853 1854 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 1855 ret = false; 1856 1857out_unlock: 1858 spin_unlock(&mchdev_lock); 1859 1860 return ret; 1861} 1862EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 1863 1864/** 1865 * Tells the intel_ips driver that the i915 driver is now loaded, if 1866 * IPS got loaded first. 1867 * 1868 * This awkward dance is so that neither module has to depend on the 1869 * other in order for IPS to do the appropriate communication of 1870 * GPU turbo limits to i915. 1871 */ 1872static void 1873ips_ping_for_i915_load(void) 1874{ 1875 void (*link)(void); 1876 1877 link = symbol_get(ips_link_to_i915_driver); 1878 if (link) { 1879 link(); 1880 symbol_put(ips_link_to_i915_driver); 1881 } 1882} 1883 1884/** 1885 * i915_driver_load - setup chip and create an initial config 1886 * @dev: DRM device 1887 * @flags: startup flags 1888 * 1889 * The driver load routine has to do several things: 1890 * - drive output discovery via intel_modeset_init() 1891 * - initialize the memory manager 1892 * - allocate initial config memory 1893 * - setup the DRM framebuffer with the allocated memory 1894 */ 1895int i915_driver_load(struct drm_device *dev, unsigned long flags) 1896{ 1897 struct drm_i915_private *dev_priv; 1898 int ret = 0, mmio_bar; 1899 uint32_t agp_size; 1900 1901 /* i915 has 4 more counters */ 1902 dev->counters += 4; 1903 dev->types[6] = _DRM_STAT_IRQ; 1904 dev->types[7] = _DRM_STAT_PRIMARY; 1905 dev->types[8] = _DRM_STAT_SECONDARY; 1906 dev->types[9] = _DRM_STAT_DMA; 1907 1908 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); 1909 if (dev_priv == NULL) 1910 return -ENOMEM; 1911 1912 dev->dev_private = (void *)dev_priv; 1913 dev_priv->dev = dev; 1914 dev_priv->info = (struct intel_device_info *) flags; 1915 1916 if (i915_get_bridge_dev(dev)) { 1917 ret = -EIO; 1918 goto free_priv; 1919 } 1920 1921 /* overlay on gen2 is broken and can't address above 1G */ 1922 if (IS_GEN2(dev)) 1923 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1924 1925 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1926 * using 32bit addressing, overwriting memory if HWS is located 1927 * above 4GB. 1928 * 1929 * The documentation also mentions an issue with undefined 1930 * behaviour if any general state is accessed within a page above 4GB, 1931 * which also needs to be handled carefully. 1932 */ 1933 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1934 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1935 1936 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1937 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1938 if (!dev_priv->regs) { 1939 DRM_ERROR("failed to map registers\n"); 1940 ret = -EIO; 1941 goto put_bridge; 1942 } 1943 1944 dev_priv->mm.gtt = intel_gtt_get(); 1945 if (!dev_priv->mm.gtt) { 1946 DRM_ERROR("Failed to initialize GTT\n"); 1947 ret = -ENODEV; 1948 goto out_iomapfree; 1949 } 1950 1951 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1952 1953 dev_priv->mm.gtt_mapping = 1954 io_mapping_create_wc(dev->agp->base, agp_size); 1955 if (dev_priv->mm.gtt_mapping == NULL) { 1956 ret = -EIO; 1957 goto out_rmmap; 1958 } 1959 1960 /* Set up a WC MTRR for non-PAT systems. This is more common than 1961 * one would think, because the kernel disables PAT on first 1962 * generation Core chips because WC PAT gets overridden by a UC 1963 * MTRR if present. Even if a UC MTRR isn't present. 1964 */ 1965 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 1966 agp_size, 1967 MTRR_TYPE_WRCOMB, 1); 1968 if (dev_priv->mm.gtt_mtrr < 0) { 1969 DRM_INFO("MTRR allocation failed. Graphics " 1970 "performance may suffer.\n"); 1971 } 1972 1973 /* The i915 workqueue is primarily used for batched retirement of 1974 * requests (and thus managing bo) once the task has been completed 1975 * by the GPU. i915_gem_retire_requests() is called directly when we 1976 * need high-priority retirement, such as waiting for an explicit 1977 * bo. 1978 * 1979 * It is also used for periodic low-priority events, such as 1980 * idle-timers and recording error state. 1981 * 1982 * All tasks on the workqueue are expected to acquire the dev mutex 1983 * so there is no point in running more than one instance of the 1984 * workqueue at any time: max_active = 1 and NON_REENTRANT. 1985 */ 1986 dev_priv->wq = alloc_workqueue("i915", 1987 WQ_UNBOUND | WQ_NON_REENTRANT, 1988 1); 1989 if (dev_priv->wq == NULL) { 1990 DRM_ERROR("Failed to create our workqueue.\n"); 1991 ret = -ENOMEM; 1992 goto out_iomapfree; 1993 } 1994 1995 /* enable GEM by default */ 1996 dev_priv->has_gem = 1; 1997 1998 intel_irq_init(dev); 1999 2000 /* Try to make sure MCHBAR is enabled before poking at it */ 2001 intel_setup_mchbar(dev); 2002 intel_setup_gmbus(dev); 2003 intel_opregion_setup(dev); 2004 2005 /* Make sure the bios did its job and set up vital registers */ 2006 intel_setup_bios(dev); 2007 2008 i915_gem_load(dev); 2009 2010 /* Init HWS */ 2011 if (!I915_NEED_GFX_HWS(dev)) { 2012 ret = i915_init_phys_hws(dev); 2013 if (ret) 2014 goto out_gem_unload; 2015 } 2016 2017 if (IS_PINEVIEW(dev)) 2018 i915_pineview_get_mem_freq(dev); 2019 else if (IS_GEN5(dev)) 2020 i915_ironlake_get_mem_freq(dev); 2021 2022 /* On the 945G/GM, the chipset reports the MSI capability on the 2023 * integrated graphics even though the support isn't actually there 2024 * according to the published specs. It doesn't appear to function 2025 * correctly in testing on 945G. 2026 * This may be a side effect of MSI having been made available for PEG 2027 * and the registers being closely associated. 2028 * 2029 * According to chipset errata, on the 965GM, MSI interrupts may 2030 * be lost or delayed, but we use them anyways to avoid 2031 * stuck interrupts on some machines. 2032 */ 2033 if (!IS_I945G(dev) && !IS_I945GM(dev)) 2034 pci_enable_msi(dev->pdev); 2035 2036 spin_lock_init(&dev_priv->irq_lock); 2037 spin_lock_init(&dev_priv->error_lock); 2038 spin_lock_init(&dev_priv->rps_lock); 2039 2040 if (IS_MOBILE(dev) || !IS_GEN2(dev)) 2041 dev_priv->num_pipe = 2; 2042 else 2043 dev_priv->num_pipe = 1; 2044 2045 ret = drm_vblank_init(dev, dev_priv->num_pipe); 2046 if (ret) 2047 goto out_gem_unload; 2048 2049 /* Start out suspended */ 2050 dev_priv->mm.suspended = 1; 2051 2052 intel_detect_pch(dev); 2053 2054 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2055 ret = i915_load_modeset_init(dev); 2056 if (ret < 0) { 2057 DRM_ERROR("failed to init modeset\n"); 2058 goto out_gem_unload; 2059 } 2060 } 2061 2062 /* Must be done after probing outputs */ 2063 intel_opregion_init(dev); 2064 acpi_video_register(); 2065 2066 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2067 (unsigned long) dev); 2068 2069 spin_lock(&mchdev_lock); 2070 i915_mch_dev = dev_priv; 2071 dev_priv->mchdev_lock = &mchdev_lock; 2072 spin_unlock(&mchdev_lock); 2073 2074 ips_ping_for_i915_load(); 2075 2076 return 0; 2077 2078out_gem_unload: 2079 if (dev->pdev->msi_enabled) 2080 pci_disable_msi(dev->pdev); 2081 2082 intel_teardown_gmbus(dev); 2083 intel_teardown_mchbar(dev); 2084 destroy_workqueue(dev_priv->wq); 2085out_iomapfree: 2086 io_mapping_free(dev_priv->mm.gtt_mapping); 2087out_rmmap: 2088 pci_iounmap(dev->pdev, dev_priv->regs); 2089put_bridge: 2090 pci_dev_put(dev_priv->bridge_dev); 2091free_priv: 2092 kfree(dev_priv); 2093 return ret; 2094} 2095 2096int i915_driver_unload(struct drm_device *dev) 2097{ 2098 struct drm_i915_private *dev_priv = dev->dev_private; 2099 int ret; 2100 2101 spin_lock(&mchdev_lock); 2102 i915_mch_dev = NULL; 2103 spin_unlock(&mchdev_lock); 2104 2105 if (dev_priv->mm.inactive_shrinker.shrink) 2106 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 2107 2108 mutex_lock(&dev->struct_mutex); 2109 ret = i915_gpu_idle(dev); 2110 if (ret) 2111 DRM_ERROR("failed to idle hardware: %d\n", ret); 2112 mutex_unlock(&dev->struct_mutex); 2113 2114 /* Cancel the retire work handler, which should be idle now. */ 2115 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 2116 2117 io_mapping_free(dev_priv->mm.gtt_mapping); 2118 if (dev_priv->mm.gtt_mtrr >= 0) { 2119 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 2120 dev->agp->agp_info.aper_size * 1024 * 1024); 2121 dev_priv->mm.gtt_mtrr = -1; 2122 } 2123 2124 acpi_video_unregister(); 2125 2126 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2127 intel_fbdev_fini(dev); 2128 intel_modeset_cleanup(dev); 2129 2130 /* 2131 * free the memory space allocated for the child device 2132 * config parsed from VBT 2133 */ 2134 if (dev_priv->child_dev && dev_priv->child_dev_num) { 2135 kfree(dev_priv->child_dev); 2136 dev_priv->child_dev = NULL; 2137 dev_priv->child_dev_num = 0; 2138 } 2139 2140 vga_switcheroo_unregister_client(dev->pdev); 2141 vga_client_register(dev->pdev, NULL, NULL, NULL); 2142 } 2143 2144 /* Free error state after interrupts are fully disabled. */ 2145 del_timer_sync(&dev_priv->hangcheck_timer); 2146 cancel_work_sync(&dev_priv->error_work); 2147 i915_destroy_error_state(dev); 2148 2149 if (dev->pdev->msi_enabled) 2150 pci_disable_msi(dev->pdev); 2151 2152 intel_opregion_fini(dev); 2153 2154 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2155 /* Flush any outstanding unpin_work. */ 2156 flush_workqueue(dev_priv->wq); 2157 2158 mutex_lock(&dev->struct_mutex); 2159 i915_gem_free_all_phys_object(dev); 2160 i915_gem_cleanup_ringbuffer(dev); 2161 mutex_unlock(&dev->struct_mutex); 2162 if (I915_HAS_FBC(dev) && i915_powersave) 2163 i915_cleanup_compression(dev); 2164 drm_mm_takedown(&dev_priv->mm.stolen); 2165 2166 intel_cleanup_overlay(dev); 2167 2168 if (!I915_NEED_GFX_HWS(dev)) 2169 i915_free_hws(dev); 2170 } 2171 2172 if (dev_priv->regs != NULL) 2173 pci_iounmap(dev->pdev, dev_priv->regs); 2174 2175 intel_teardown_gmbus(dev); 2176 intel_teardown_mchbar(dev); 2177 2178 destroy_workqueue(dev_priv->wq); 2179 2180 pci_dev_put(dev_priv->bridge_dev); 2181 kfree(dev->dev_private); 2182 2183 return 0; 2184} 2185 2186int i915_driver_open(struct drm_device *dev, struct drm_file *file) 2187{ 2188 struct drm_i915_file_private *file_priv; 2189 2190 DRM_DEBUG_DRIVER("\n"); 2191 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); 2192 if (!file_priv) 2193 return -ENOMEM; 2194 2195 file->driver_priv = file_priv; 2196 2197 spin_lock_init(&file_priv->mm.lock); 2198 INIT_LIST_HEAD(&file_priv->mm.request_list); 2199 2200 return 0; 2201} 2202 2203/** 2204 * i915_driver_lastclose - clean up after all DRM clients have exited 2205 * @dev: DRM device 2206 * 2207 * Take care of cleaning up after all DRM clients have exited. In the 2208 * mode setting case, we want to restore the kernel's initial mode (just 2209 * in case the last client left us in a bad state). 2210 * 2211 * Additionally, in the non-mode setting case, we'll tear down the AGP 2212 * and DMA structures, since the kernel won't be using them, and clea 2213 * up any GEM state. 2214 */ 2215void i915_driver_lastclose(struct drm_device * dev) 2216{ 2217 drm_i915_private_t *dev_priv = dev->dev_private; 2218 2219 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 2220 intel_fb_restore_mode(dev); 2221 vga_switcheroo_process_delayed_switch(); 2222 return; 2223 } 2224 2225 i915_gem_lastclose(dev); 2226 2227 if (dev_priv->agp_heap) 2228 i915_mem_takedown(&(dev_priv->agp_heap)); 2229 2230 i915_dma_cleanup(dev); 2231} 2232 2233void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 2234{ 2235 drm_i915_private_t *dev_priv = dev->dev_private; 2236 i915_gem_release(dev, file_priv); 2237 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2238 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 2239} 2240 2241void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 2242{ 2243 struct drm_i915_file_private *file_priv = file->driver_priv; 2244 2245 kfree(file_priv); 2246} 2247 2248struct drm_ioctl_desc i915_ioctls[] = { 2249 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2250 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 2251 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), 2252 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 2253 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 2254 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 2255 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), 2256 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2257 DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), 2258 DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), 2259 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2260 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 2261 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2262 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2263 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), 2264 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 2265 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2266 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2267 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 2268 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 2269 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2270 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2271 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 2272 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 2273 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2274 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2275 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 2276 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 2277 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 2278 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 2279 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 2280 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 2281 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 2282 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 2283 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 2284 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 2285 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 2286 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2287 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2288 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2289}; 2290 2291int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2292 2293/** 2294 * Determine if the device really is AGP or not. 2295 * 2296 * All Intel graphics chipsets are treated as AGP, even if they are really 2297 * PCI-e. 2298 * 2299 * \param dev The device to be tested. 2300 * 2301 * \returns 2302 * A value of 1 is always retured to indictate every i9x5 is AGP. 2303 */ 2304int i915_driver_device_is_agp(struct drm_device * dev) 2305{ 2306 return 1; 2307} 2308