i915_dma.c revision f71d4af4cd475aced6d9ec9730b03885ac80b833
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include "drmP.h" 30#include "drm.h" 31#include "drm_crtc_helper.h" 32#include "drm_fb_helper.h" 33#include "intel_drv.h" 34#include "i915_drm.h" 35#include "i915_drv.h" 36#include "i915_trace.h" 37#include "../../../platform/x86/intel_ips.h" 38#include <linux/pci.h> 39#include <linux/vgaarb.h> 40#include <linux/acpi.h> 41#include <linux/pnp.h> 42#include <linux/vga_switcheroo.h> 43#include <linux/slab.h> 44#include <acpi/video.h> 45 46static void i915_write_hws_pga(struct drm_device *dev) 47{ 48 drm_i915_private_t *dev_priv = dev->dev_private; 49 u32 addr; 50 51 addr = dev_priv->status_page_dmah->busaddr; 52 if (INTEL_INFO(dev)->gen >= 4) 53 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 54 I915_WRITE(HWS_PGA, addr); 55} 56 57/** 58 * Sets up the hardware status page for devices that need a physical address 59 * in the register. 60 */ 61static int i915_init_phys_hws(struct drm_device *dev) 62{ 63 drm_i915_private_t *dev_priv = dev->dev_private; 64 struct intel_ring_buffer *ring = LP_RING(dev_priv); 65 66 /* Program Hardware Status Page */ 67 dev_priv->status_page_dmah = 68 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 69 70 if (!dev_priv->status_page_dmah) { 71 DRM_ERROR("Can not allocate hardware status page\n"); 72 return -ENOMEM; 73 } 74 ring->status_page.page_addr = 75 (void __force __iomem *)dev_priv->status_page_dmah->vaddr; 76 77 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); 78 79 i915_write_hws_pga(dev); 80 81 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 82 return 0; 83} 84 85/** 86 * Frees the hardware status page, whether it's a physical address or a virtual 87 * address set up by the X Server. 88 */ 89static void i915_free_hws(struct drm_device *dev) 90{ 91 drm_i915_private_t *dev_priv = dev->dev_private; 92 struct intel_ring_buffer *ring = LP_RING(dev_priv); 93 94 if (dev_priv->status_page_dmah) { 95 drm_pci_free(dev, dev_priv->status_page_dmah); 96 dev_priv->status_page_dmah = NULL; 97 } 98 99 if (ring->status_page.gfx_addr) { 100 ring->status_page.gfx_addr = 0; 101 drm_core_ioremapfree(&dev_priv->hws_map, dev); 102 } 103 104 /* Need to rewrite hardware status page */ 105 I915_WRITE(HWS_PGA, 0x1ffff000); 106} 107 108void i915_kernel_lost_context(struct drm_device * dev) 109{ 110 drm_i915_private_t *dev_priv = dev->dev_private; 111 struct drm_i915_master_private *master_priv; 112 struct intel_ring_buffer *ring = LP_RING(dev_priv); 113 114 /* 115 * We should never lose context on the ring with modesetting 116 * as we don't expose it to userspace 117 */ 118 if (drm_core_check_feature(dev, DRIVER_MODESET)) 119 return; 120 121 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 122 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 123 ring->space = ring->head - (ring->tail + 8); 124 if (ring->space < 0) 125 ring->space += ring->size; 126 127 if (!dev->primary->master) 128 return; 129 130 master_priv = dev->primary->master->driver_priv; 131 if (ring->head == ring->tail && master_priv->sarea_priv) 132 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 133} 134 135static int i915_dma_cleanup(struct drm_device * dev) 136{ 137 drm_i915_private_t *dev_priv = dev->dev_private; 138 int i; 139 140 /* Make sure interrupts are disabled here because the uninstall ioctl 141 * may not have been called from userspace and after dev_private 142 * is freed, it's too late. 143 */ 144 if (dev->irq_enabled) 145 drm_irq_uninstall(dev); 146 147 mutex_lock(&dev->struct_mutex); 148 for (i = 0; i < I915_NUM_RINGS; i++) 149 intel_cleanup_ring_buffer(&dev_priv->ring[i]); 150 mutex_unlock(&dev->struct_mutex); 151 152 /* Clear the HWS virtual address at teardown */ 153 if (I915_NEED_GFX_HWS(dev)) 154 i915_free_hws(dev); 155 156 return 0; 157} 158 159static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 160{ 161 drm_i915_private_t *dev_priv = dev->dev_private; 162 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 163 int ret; 164 165 master_priv->sarea = drm_getsarea(dev); 166 if (master_priv->sarea) { 167 master_priv->sarea_priv = (drm_i915_sarea_t *) 168 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 169 } else { 170 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); 171 } 172 173 if (init->ring_size != 0) { 174 if (LP_RING(dev_priv)->obj != NULL) { 175 i915_dma_cleanup(dev); 176 DRM_ERROR("Client tried to initialize ringbuffer in " 177 "GEM mode\n"); 178 return -EINVAL; 179 } 180 181 ret = intel_render_ring_init_dri(dev, 182 init->ring_start, 183 init->ring_size); 184 if (ret) { 185 i915_dma_cleanup(dev); 186 return ret; 187 } 188 } 189 190 dev_priv->cpp = init->cpp; 191 dev_priv->back_offset = init->back_offset; 192 dev_priv->front_offset = init->front_offset; 193 dev_priv->current_page = 0; 194 if (master_priv->sarea_priv) 195 master_priv->sarea_priv->pf_current_page = 0; 196 197 /* Allow hardware batchbuffers unless told otherwise. 198 */ 199 dev_priv->allow_batchbuffer = 1; 200 201 return 0; 202} 203 204static int i915_dma_resume(struct drm_device * dev) 205{ 206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 207 struct intel_ring_buffer *ring = LP_RING(dev_priv); 208 209 DRM_DEBUG_DRIVER("%s\n", __func__); 210 211 if (ring->map.handle == NULL) { 212 DRM_ERROR("can not ioremap virtual address for" 213 " ring buffer\n"); 214 return -ENOMEM; 215 } 216 217 /* Program Hardware Status Page */ 218 if (!ring->status_page.page_addr) { 219 DRM_ERROR("Can not find hardware status page\n"); 220 return -EINVAL; 221 } 222 DRM_DEBUG_DRIVER("hw status page @ %p\n", 223 ring->status_page.page_addr); 224 if (ring->status_page.gfx_addr != 0) 225 intel_ring_setup_status_page(ring); 226 else 227 i915_write_hws_pga(dev); 228 229 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 230 231 return 0; 232} 233 234static int i915_dma_init(struct drm_device *dev, void *data, 235 struct drm_file *file_priv) 236{ 237 drm_i915_init_t *init = data; 238 int retcode = 0; 239 240 switch (init->func) { 241 case I915_INIT_DMA: 242 retcode = i915_initialize(dev, init); 243 break; 244 case I915_CLEANUP_DMA: 245 retcode = i915_dma_cleanup(dev); 246 break; 247 case I915_RESUME_DMA: 248 retcode = i915_dma_resume(dev); 249 break; 250 default: 251 retcode = -EINVAL; 252 break; 253 } 254 255 return retcode; 256} 257 258/* Implement basically the same security restrictions as hardware does 259 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 260 * 261 * Most of the calculations below involve calculating the size of a 262 * particular instruction. It's important to get the size right as 263 * that tells us where the next instruction to check is. Any illegal 264 * instruction detected will be given a size of zero, which is a 265 * signal to abort the rest of the buffer. 266 */ 267static int validate_cmd(int cmd) 268{ 269 switch (((cmd >> 29) & 0x7)) { 270 case 0x0: 271 switch ((cmd >> 23) & 0x3f) { 272 case 0x0: 273 return 1; /* MI_NOOP */ 274 case 0x4: 275 return 1; /* MI_FLUSH */ 276 default: 277 return 0; /* disallow everything else */ 278 } 279 break; 280 case 0x1: 281 return 0; /* reserved */ 282 case 0x2: 283 return (cmd & 0xff) + 2; /* 2d commands */ 284 case 0x3: 285 if (((cmd >> 24) & 0x1f) <= 0x18) 286 return 1; 287 288 switch ((cmd >> 24) & 0x1f) { 289 case 0x1c: 290 return 1; 291 case 0x1d: 292 switch ((cmd >> 16) & 0xff) { 293 case 0x3: 294 return (cmd & 0x1f) + 2; 295 case 0x4: 296 return (cmd & 0xf) + 2; 297 default: 298 return (cmd & 0xffff) + 2; 299 } 300 case 0x1e: 301 if (cmd & (1 << 23)) 302 return (cmd & 0xffff) + 1; 303 else 304 return 1; 305 case 0x1f: 306 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 307 return (cmd & 0x1ffff) + 2; 308 else if (cmd & (1 << 17)) /* indirect random */ 309 if ((cmd & 0xffff) == 0) 310 return 0; /* unknown length, too hard */ 311 else 312 return (((cmd & 0xffff) + 1) / 2) + 1; 313 else 314 return 2; /* indirect sequential */ 315 default: 316 return 0; 317 } 318 default: 319 return 0; 320 } 321 322 return 0; 323} 324 325static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 326{ 327 drm_i915_private_t *dev_priv = dev->dev_private; 328 int i, ret; 329 330 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 331 return -EINVAL; 332 333 for (i = 0; i < dwords;) { 334 int sz = validate_cmd(buffer[i]); 335 if (sz == 0 || i + sz > dwords) 336 return -EINVAL; 337 i += sz; 338 } 339 340 ret = BEGIN_LP_RING((dwords+1)&~1); 341 if (ret) 342 return ret; 343 344 for (i = 0; i < dwords; i++) 345 OUT_RING(buffer[i]); 346 if (dwords & 1) 347 OUT_RING(0); 348 349 ADVANCE_LP_RING(); 350 351 return 0; 352} 353 354int 355i915_emit_box(struct drm_device *dev, 356 struct drm_clip_rect *box, 357 int DR1, int DR4) 358{ 359 struct drm_i915_private *dev_priv = dev->dev_private; 360 int ret; 361 362 if (box->y2 <= box->y1 || box->x2 <= box->x1 || 363 box->y2 <= 0 || box->x2 <= 0) { 364 DRM_ERROR("Bad box %d,%d..%d,%d\n", 365 box->x1, box->y1, box->x2, box->y2); 366 return -EINVAL; 367 } 368 369 if (INTEL_INFO(dev)->gen >= 4) { 370 ret = BEGIN_LP_RING(4); 371 if (ret) 372 return ret; 373 374 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 375 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 376 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 377 OUT_RING(DR4); 378 } else { 379 ret = BEGIN_LP_RING(6); 380 if (ret) 381 return ret; 382 383 OUT_RING(GFX_OP_DRAWRECT_INFO); 384 OUT_RING(DR1); 385 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 386 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 387 OUT_RING(DR4); 388 OUT_RING(0); 389 } 390 ADVANCE_LP_RING(); 391 392 return 0; 393} 394 395/* XXX: Emitting the counter should really be moved to part of the IRQ 396 * emit. For now, do it in both places: 397 */ 398 399static void i915_emit_breadcrumb(struct drm_device *dev) 400{ 401 drm_i915_private_t *dev_priv = dev->dev_private; 402 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 403 404 dev_priv->counter++; 405 if (dev_priv->counter > 0x7FFFFFFFUL) 406 dev_priv->counter = 0; 407 if (master_priv->sarea_priv) 408 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 409 410 if (BEGIN_LP_RING(4) == 0) { 411 OUT_RING(MI_STORE_DWORD_INDEX); 412 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 413 OUT_RING(dev_priv->counter); 414 OUT_RING(0); 415 ADVANCE_LP_RING(); 416 } 417} 418 419static int i915_dispatch_cmdbuffer(struct drm_device * dev, 420 drm_i915_cmdbuffer_t *cmd, 421 struct drm_clip_rect *cliprects, 422 void *cmdbuf) 423{ 424 int nbox = cmd->num_cliprects; 425 int i = 0, count, ret; 426 427 if (cmd->sz & 0x3) { 428 DRM_ERROR("alignment"); 429 return -EINVAL; 430 } 431 432 i915_kernel_lost_context(dev); 433 434 count = nbox ? nbox : 1; 435 436 for (i = 0; i < count; i++) { 437 if (i < nbox) { 438 ret = i915_emit_box(dev, &cliprects[i], 439 cmd->DR1, cmd->DR4); 440 if (ret) 441 return ret; 442 } 443 444 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 445 if (ret) 446 return ret; 447 } 448 449 i915_emit_breadcrumb(dev); 450 return 0; 451} 452 453static int i915_dispatch_batchbuffer(struct drm_device * dev, 454 drm_i915_batchbuffer_t * batch, 455 struct drm_clip_rect *cliprects) 456{ 457 struct drm_i915_private *dev_priv = dev->dev_private; 458 int nbox = batch->num_cliprects; 459 int i, count, ret; 460 461 if ((batch->start | batch->used) & 0x7) { 462 DRM_ERROR("alignment"); 463 return -EINVAL; 464 } 465 466 i915_kernel_lost_context(dev); 467 468 count = nbox ? nbox : 1; 469 for (i = 0; i < count; i++) { 470 if (i < nbox) { 471 ret = i915_emit_box(dev, &cliprects[i], 472 batch->DR1, batch->DR4); 473 if (ret) 474 return ret; 475 } 476 477 if (!IS_I830(dev) && !IS_845G(dev)) { 478 ret = BEGIN_LP_RING(2); 479 if (ret) 480 return ret; 481 482 if (INTEL_INFO(dev)->gen >= 4) { 483 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 484 OUT_RING(batch->start); 485 } else { 486 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 487 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 488 } 489 } else { 490 ret = BEGIN_LP_RING(4); 491 if (ret) 492 return ret; 493 494 OUT_RING(MI_BATCH_BUFFER); 495 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 496 OUT_RING(batch->start + batch->used - 4); 497 OUT_RING(0); 498 } 499 ADVANCE_LP_RING(); 500 } 501 502 503 if (IS_G4X(dev) || IS_GEN5(dev)) { 504 if (BEGIN_LP_RING(2) == 0) { 505 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); 506 OUT_RING(MI_NOOP); 507 ADVANCE_LP_RING(); 508 } 509 } 510 511 i915_emit_breadcrumb(dev); 512 return 0; 513} 514 515static int i915_dispatch_flip(struct drm_device * dev) 516{ 517 drm_i915_private_t *dev_priv = dev->dev_private; 518 struct drm_i915_master_private *master_priv = 519 dev->primary->master->driver_priv; 520 int ret; 521 522 if (!master_priv->sarea_priv) 523 return -EINVAL; 524 525 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 526 __func__, 527 dev_priv->current_page, 528 master_priv->sarea_priv->pf_current_page); 529 530 i915_kernel_lost_context(dev); 531 532 ret = BEGIN_LP_RING(10); 533 if (ret) 534 return ret; 535 536 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 537 OUT_RING(0); 538 539 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 540 OUT_RING(0); 541 if (dev_priv->current_page == 0) { 542 OUT_RING(dev_priv->back_offset); 543 dev_priv->current_page = 1; 544 } else { 545 OUT_RING(dev_priv->front_offset); 546 dev_priv->current_page = 0; 547 } 548 OUT_RING(0); 549 550 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 551 OUT_RING(0); 552 553 ADVANCE_LP_RING(); 554 555 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 556 557 if (BEGIN_LP_RING(4) == 0) { 558 OUT_RING(MI_STORE_DWORD_INDEX); 559 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 560 OUT_RING(dev_priv->counter); 561 OUT_RING(0); 562 ADVANCE_LP_RING(); 563 } 564 565 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 566 return 0; 567} 568 569static int i915_quiescent(struct drm_device *dev) 570{ 571 struct intel_ring_buffer *ring = LP_RING(dev->dev_private); 572 573 i915_kernel_lost_context(dev); 574 return intel_wait_ring_idle(ring); 575} 576 577static int i915_flush_ioctl(struct drm_device *dev, void *data, 578 struct drm_file *file_priv) 579{ 580 int ret; 581 582 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 583 584 mutex_lock(&dev->struct_mutex); 585 ret = i915_quiescent(dev); 586 mutex_unlock(&dev->struct_mutex); 587 588 return ret; 589} 590 591static int i915_batchbuffer(struct drm_device *dev, void *data, 592 struct drm_file *file_priv) 593{ 594 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 595 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 596 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 597 master_priv->sarea_priv; 598 drm_i915_batchbuffer_t *batch = data; 599 int ret; 600 struct drm_clip_rect *cliprects = NULL; 601 602 if (!dev_priv->allow_batchbuffer) { 603 DRM_ERROR("Batchbuffer ioctl disabled\n"); 604 return -EINVAL; 605 } 606 607 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", 608 batch->start, batch->used, batch->num_cliprects); 609 610 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 611 612 if (batch->num_cliprects < 0) 613 return -EINVAL; 614 615 if (batch->num_cliprects) { 616 cliprects = kcalloc(batch->num_cliprects, 617 sizeof(struct drm_clip_rect), 618 GFP_KERNEL); 619 if (cliprects == NULL) 620 return -ENOMEM; 621 622 ret = copy_from_user(cliprects, batch->cliprects, 623 batch->num_cliprects * 624 sizeof(struct drm_clip_rect)); 625 if (ret != 0) { 626 ret = -EFAULT; 627 goto fail_free; 628 } 629 } 630 631 mutex_lock(&dev->struct_mutex); 632 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 633 mutex_unlock(&dev->struct_mutex); 634 635 if (sarea_priv) 636 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 637 638fail_free: 639 kfree(cliprects); 640 641 return ret; 642} 643 644static int i915_cmdbuffer(struct drm_device *dev, void *data, 645 struct drm_file *file_priv) 646{ 647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 648 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 649 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 650 master_priv->sarea_priv; 651 drm_i915_cmdbuffer_t *cmdbuf = data; 652 struct drm_clip_rect *cliprects = NULL; 653 void *batch_data; 654 int ret; 655 656 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 657 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 658 659 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 660 661 if (cmdbuf->num_cliprects < 0) 662 return -EINVAL; 663 664 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); 665 if (batch_data == NULL) 666 return -ENOMEM; 667 668 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 669 if (ret != 0) { 670 ret = -EFAULT; 671 goto fail_batch_free; 672 } 673 674 if (cmdbuf->num_cliprects) { 675 cliprects = kcalloc(cmdbuf->num_cliprects, 676 sizeof(struct drm_clip_rect), GFP_KERNEL); 677 if (cliprects == NULL) { 678 ret = -ENOMEM; 679 goto fail_batch_free; 680 } 681 682 ret = copy_from_user(cliprects, cmdbuf->cliprects, 683 cmdbuf->num_cliprects * 684 sizeof(struct drm_clip_rect)); 685 if (ret != 0) { 686 ret = -EFAULT; 687 goto fail_clip_free; 688 } 689 } 690 691 mutex_lock(&dev->struct_mutex); 692 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 693 mutex_unlock(&dev->struct_mutex); 694 if (ret) { 695 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 696 goto fail_clip_free; 697 } 698 699 if (sarea_priv) 700 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 701 702fail_clip_free: 703 kfree(cliprects); 704fail_batch_free: 705 kfree(batch_data); 706 707 return ret; 708} 709 710static int i915_flip_bufs(struct drm_device *dev, void *data, 711 struct drm_file *file_priv) 712{ 713 int ret; 714 715 DRM_DEBUG_DRIVER("%s\n", __func__); 716 717 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 718 719 mutex_lock(&dev->struct_mutex); 720 ret = i915_dispatch_flip(dev); 721 mutex_unlock(&dev->struct_mutex); 722 723 return ret; 724} 725 726static int i915_getparam(struct drm_device *dev, void *data, 727 struct drm_file *file_priv) 728{ 729 drm_i915_private_t *dev_priv = dev->dev_private; 730 drm_i915_getparam_t *param = data; 731 int value; 732 733 if (!dev_priv) { 734 DRM_ERROR("called with no initialization\n"); 735 return -EINVAL; 736 } 737 738 switch (param->param) { 739 case I915_PARAM_IRQ_ACTIVE: 740 value = dev->pdev->irq ? 1 : 0; 741 break; 742 case I915_PARAM_ALLOW_BATCHBUFFER: 743 value = dev_priv->allow_batchbuffer ? 1 : 0; 744 break; 745 case I915_PARAM_LAST_DISPATCH: 746 value = READ_BREADCRUMB(dev_priv); 747 break; 748 case I915_PARAM_CHIPSET_ID: 749 value = dev->pci_device; 750 break; 751 case I915_PARAM_HAS_GEM: 752 value = dev_priv->has_gem; 753 break; 754 case I915_PARAM_NUM_FENCES_AVAIL: 755 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 756 break; 757 case I915_PARAM_HAS_OVERLAY: 758 value = dev_priv->overlay ? 1 : 0; 759 break; 760 case I915_PARAM_HAS_PAGEFLIPPING: 761 value = 1; 762 break; 763 case I915_PARAM_HAS_EXECBUF2: 764 /* depends on GEM */ 765 value = dev_priv->has_gem; 766 break; 767 case I915_PARAM_HAS_BSD: 768 value = HAS_BSD(dev); 769 break; 770 case I915_PARAM_HAS_BLT: 771 value = HAS_BLT(dev); 772 break; 773 case I915_PARAM_HAS_RELAXED_FENCING: 774 value = 1; 775 break; 776 case I915_PARAM_HAS_COHERENT_RINGS: 777 value = 1; 778 break; 779 case I915_PARAM_HAS_EXEC_CONSTANTS: 780 value = INTEL_INFO(dev)->gen >= 4; 781 break; 782 case I915_PARAM_HAS_RELAXED_DELTA: 783 value = 1; 784 break; 785 default: 786 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 787 param->param); 788 return -EINVAL; 789 } 790 791 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 792 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 793 return -EFAULT; 794 } 795 796 return 0; 797} 798 799static int i915_setparam(struct drm_device *dev, void *data, 800 struct drm_file *file_priv) 801{ 802 drm_i915_private_t *dev_priv = dev->dev_private; 803 drm_i915_setparam_t *param = data; 804 805 if (!dev_priv) { 806 DRM_ERROR("called with no initialization\n"); 807 return -EINVAL; 808 } 809 810 switch (param->param) { 811 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 812 break; 813 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 814 dev_priv->tex_lru_log_granularity = param->value; 815 break; 816 case I915_SETPARAM_ALLOW_BATCHBUFFER: 817 dev_priv->allow_batchbuffer = param->value; 818 break; 819 case I915_SETPARAM_NUM_USED_FENCES: 820 if (param->value > dev_priv->num_fence_regs || 821 param->value < 0) 822 return -EINVAL; 823 /* Userspace can use first N regs */ 824 dev_priv->fence_reg_start = param->value; 825 break; 826 default: 827 DRM_DEBUG_DRIVER("unknown parameter %d\n", 828 param->param); 829 return -EINVAL; 830 } 831 832 return 0; 833} 834 835static int i915_set_status_page(struct drm_device *dev, void *data, 836 struct drm_file *file_priv) 837{ 838 drm_i915_private_t *dev_priv = dev->dev_private; 839 drm_i915_hws_addr_t *hws = data; 840 struct intel_ring_buffer *ring = LP_RING(dev_priv); 841 842 if (!I915_NEED_GFX_HWS(dev)) 843 return -EINVAL; 844 845 if (!dev_priv) { 846 DRM_ERROR("called with no initialization\n"); 847 return -EINVAL; 848 } 849 850 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 851 WARN(1, "tried to set status page when mode setting active\n"); 852 return 0; 853 } 854 855 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 856 857 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 858 859 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 860 dev_priv->hws_map.size = 4*1024; 861 dev_priv->hws_map.type = 0; 862 dev_priv->hws_map.flags = 0; 863 dev_priv->hws_map.mtrr = 0; 864 865 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 866 if (dev_priv->hws_map.handle == NULL) { 867 i915_dma_cleanup(dev); 868 ring->status_page.gfx_addr = 0; 869 DRM_ERROR("can not ioremap virtual address for" 870 " G33 hw status page\n"); 871 return -ENOMEM; 872 } 873 ring->status_page.page_addr = 874 (void __force __iomem *)dev_priv->hws_map.handle; 875 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); 876 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 877 878 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 879 ring->status_page.gfx_addr); 880 DRM_DEBUG_DRIVER("load hws at %p\n", 881 ring->status_page.page_addr); 882 return 0; 883} 884 885static int i915_get_bridge_dev(struct drm_device *dev) 886{ 887 struct drm_i915_private *dev_priv = dev->dev_private; 888 889 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 890 if (!dev_priv->bridge_dev) { 891 DRM_ERROR("bridge device not found\n"); 892 return -1; 893 } 894 return 0; 895} 896 897#define MCHBAR_I915 0x44 898#define MCHBAR_I965 0x48 899#define MCHBAR_SIZE (4*4096) 900 901#define DEVEN_REG 0x54 902#define DEVEN_MCHBAR_EN (1 << 28) 903 904/* Allocate space for the MCH regs if needed, return nonzero on error */ 905static int 906intel_alloc_mchbar_resource(struct drm_device *dev) 907{ 908 drm_i915_private_t *dev_priv = dev->dev_private; 909 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 910 u32 temp_lo, temp_hi = 0; 911 u64 mchbar_addr; 912 int ret; 913 914 if (INTEL_INFO(dev)->gen >= 4) 915 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 916 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 917 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 918 919 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 920#ifdef CONFIG_PNP 921 if (mchbar_addr && 922 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 923 return 0; 924#endif 925 926 /* Get some space for it */ 927 dev_priv->mch_res.name = "i915 MCHBAR"; 928 dev_priv->mch_res.flags = IORESOURCE_MEM; 929 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, 930 &dev_priv->mch_res, 931 MCHBAR_SIZE, MCHBAR_SIZE, 932 PCIBIOS_MIN_MEM, 933 0, pcibios_align_resource, 934 dev_priv->bridge_dev); 935 if (ret) { 936 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 937 dev_priv->mch_res.start = 0; 938 return ret; 939 } 940 941 if (INTEL_INFO(dev)->gen >= 4) 942 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 943 upper_32_bits(dev_priv->mch_res.start)); 944 945 pci_write_config_dword(dev_priv->bridge_dev, reg, 946 lower_32_bits(dev_priv->mch_res.start)); 947 return 0; 948} 949 950/* Setup MCHBAR if possible, return true if we should disable it again */ 951static void 952intel_setup_mchbar(struct drm_device *dev) 953{ 954 drm_i915_private_t *dev_priv = dev->dev_private; 955 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 956 u32 temp; 957 bool enabled; 958 959 dev_priv->mchbar_need_disable = false; 960 961 if (IS_I915G(dev) || IS_I915GM(dev)) { 962 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 963 enabled = !!(temp & DEVEN_MCHBAR_EN); 964 } else { 965 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 966 enabled = temp & 1; 967 } 968 969 /* If it's already enabled, don't have to do anything */ 970 if (enabled) 971 return; 972 973 if (intel_alloc_mchbar_resource(dev)) 974 return; 975 976 dev_priv->mchbar_need_disable = true; 977 978 /* Space is allocated or reserved, so enable it. */ 979 if (IS_I915G(dev) || IS_I915GM(dev)) { 980 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, 981 temp | DEVEN_MCHBAR_EN); 982 } else { 983 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 984 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 985 } 986} 987 988static void 989intel_teardown_mchbar(struct drm_device *dev) 990{ 991 drm_i915_private_t *dev_priv = dev->dev_private; 992 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 993 u32 temp; 994 995 if (dev_priv->mchbar_need_disable) { 996 if (IS_I915G(dev) || IS_I915GM(dev)) { 997 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); 998 temp &= ~DEVEN_MCHBAR_EN; 999 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); 1000 } else { 1001 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 1002 temp &= ~1; 1003 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); 1004 } 1005 } 1006 1007 if (dev_priv->mch_res.start) 1008 release_resource(&dev_priv->mch_res); 1009} 1010 1011#define PTE_ADDRESS_MASK 0xfffff000 1012#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 1013#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 1014#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ 1015#define PTE_MAPPING_TYPE_CACHED (3 << 1) 1016#define PTE_MAPPING_TYPE_MASK (3 << 1) 1017#define PTE_VALID (1 << 0) 1018 1019/** 1020 * i915_stolen_to_phys - take an offset into stolen memory and turn it into 1021 * a physical one 1022 * @dev: drm device 1023 * @offset: address to translate 1024 * 1025 * Some chip functions require allocations from stolen space and need the 1026 * physical address of the memory in question. 1027 */ 1028static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) 1029{ 1030 struct drm_i915_private *dev_priv = dev->dev_private; 1031 struct pci_dev *pdev = dev_priv->bridge_dev; 1032 u32 base; 1033 1034#if 0 1035 /* On the machines I have tested the Graphics Base of Stolen Memory 1036 * is unreliable, so compute the base by subtracting the stolen memory 1037 * from the Top of Low Usable DRAM which is where the BIOS places 1038 * the graphics stolen memory. 1039 */ 1040 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 1041 /* top 32bits are reserved = 0 */ 1042 pci_read_config_dword(pdev, 0xA4, &base); 1043 } else { 1044 /* XXX presume 8xx is the same as i915 */ 1045 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); 1046 } 1047#else 1048 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 1049 u16 val; 1050 pci_read_config_word(pdev, 0xb0, &val); 1051 base = val >> 4 << 20; 1052 } else { 1053 u8 val; 1054 pci_read_config_byte(pdev, 0x9c, &val); 1055 base = val >> 3 << 27; 1056 } 1057 base -= dev_priv->mm.gtt->stolen_size; 1058#endif 1059 1060 return base + offset; 1061} 1062 1063static void i915_warn_stolen(struct drm_device *dev) 1064{ 1065 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); 1066 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); 1067} 1068 1069static void i915_setup_compression(struct drm_device *dev, int size) 1070{ 1071 struct drm_i915_private *dev_priv = dev->dev_private; 1072 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 1073 unsigned long cfb_base; 1074 unsigned long ll_base = 0; 1075 1076 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 1077 if (compressed_fb) 1078 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1079 if (!compressed_fb) 1080 goto err; 1081 1082 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); 1083 if (!cfb_base) 1084 goto err_fb; 1085 1086 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { 1087 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 1088 4096, 4096, 0); 1089 if (compressed_llb) 1090 compressed_llb = drm_mm_get_block(compressed_llb, 1091 4096, 4096); 1092 if (!compressed_llb) 1093 goto err_fb; 1094 1095 ll_base = i915_stolen_to_phys(dev, compressed_llb->start); 1096 if (!ll_base) 1097 goto err_llb; 1098 } 1099 1100 dev_priv->cfb_size = size; 1101 1102 intel_disable_fbc(dev); 1103 dev_priv->compressed_fb = compressed_fb; 1104 if (HAS_PCH_SPLIT(dev)) 1105 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1106 else if (IS_GM45(dev)) { 1107 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1108 } else { 1109 I915_WRITE(FBC_CFB_BASE, cfb_base); 1110 I915_WRITE(FBC_LL_BASE, ll_base); 1111 dev_priv->compressed_llb = compressed_llb; 1112 } 1113 1114 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", 1115 cfb_base, ll_base, size >> 20); 1116 return; 1117 1118err_llb: 1119 drm_mm_put_block(compressed_llb); 1120err_fb: 1121 drm_mm_put_block(compressed_fb); 1122err: 1123 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1124 i915_warn_stolen(dev); 1125} 1126 1127static void i915_cleanup_compression(struct drm_device *dev) 1128{ 1129 struct drm_i915_private *dev_priv = dev->dev_private; 1130 1131 drm_mm_put_block(dev_priv->compressed_fb); 1132 if (dev_priv->compressed_llb) 1133 drm_mm_put_block(dev_priv->compressed_llb); 1134} 1135 1136/* true = enable decode, false = disable decoder */ 1137static unsigned int i915_vga_set_decode(void *cookie, bool state) 1138{ 1139 struct drm_device *dev = cookie; 1140 1141 intel_modeset_vga_set_state(dev, state); 1142 if (state) 1143 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1144 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1145 else 1146 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1147} 1148 1149static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1150{ 1151 struct drm_device *dev = pci_get_drvdata(pdev); 1152 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1153 if (state == VGA_SWITCHEROO_ON) { 1154 printk(KERN_INFO "i915: switched on\n"); 1155 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1156 /* i915 resume handler doesn't set to D0 */ 1157 pci_set_power_state(dev->pdev, PCI_D0); 1158 i915_resume(dev); 1159 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1160 } else { 1161 printk(KERN_ERR "i915: switched off\n"); 1162 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1163 i915_suspend(dev, pmm); 1164 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1165 } 1166} 1167 1168static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 1169{ 1170 struct drm_device *dev = pci_get_drvdata(pdev); 1171 bool can_switch; 1172 1173 spin_lock(&dev->count_lock); 1174 can_switch = (dev->open_count == 0); 1175 spin_unlock(&dev->count_lock); 1176 return can_switch; 1177} 1178 1179static int i915_load_gem_init(struct drm_device *dev) 1180{ 1181 struct drm_i915_private *dev_priv = dev->dev_private; 1182 unsigned long prealloc_size, gtt_size, mappable_size; 1183 int ret; 1184 1185 prealloc_size = dev_priv->mm.gtt->stolen_size; 1186 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; 1187 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1188 1189 /* Basic memrange allocator for stolen space */ 1190 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); 1191 1192 /* Let GEM Manage all of the aperture. 1193 * 1194 * However, leave one page at the end still bound to the scratch page. 1195 * There are a number of places where the hardware apparently 1196 * prefetches past the end of the object, and we've seen multiple 1197 * hangs with the GPU head pointer stuck in a batchbuffer bound 1198 * at the last page of the aperture. One page should be enough to 1199 * keep any prefetching inside of the aperture. 1200 */ 1201 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); 1202 1203 mutex_lock(&dev->struct_mutex); 1204 ret = i915_gem_init_ringbuffer(dev); 1205 mutex_unlock(&dev->struct_mutex); 1206 if (ret) 1207 return ret; 1208 1209 /* Try to set up FBC with a reasonable compressed buffer size */ 1210 if (I915_HAS_FBC(dev) && i915_powersave) { 1211 int cfb_size; 1212 1213 /* Leave 1M for line length buffer & misc. */ 1214 1215 /* Try to get a 32M buffer... */ 1216 if (prealloc_size > (36*1024*1024)) 1217 cfb_size = 32*1024*1024; 1218 else /* fall back to 7/8 of the stolen space */ 1219 cfb_size = prealloc_size * 7 / 8; 1220 i915_setup_compression(dev, cfb_size); 1221 } 1222 1223 /* Allow hardware batchbuffers unless told otherwise. */ 1224 dev_priv->allow_batchbuffer = 1; 1225 return 0; 1226} 1227 1228static int i915_load_modeset_init(struct drm_device *dev) 1229{ 1230 struct drm_i915_private *dev_priv = dev->dev_private; 1231 int ret; 1232 1233 ret = intel_parse_bios(dev); 1234 if (ret) 1235 DRM_INFO("failed to find VBIOS tables\n"); 1236 1237 /* If we have > 1 VGA cards, then we need to arbitrate access 1238 * to the common VGA resources. 1239 * 1240 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), 1241 * then we do not take part in VGA arbitration and the 1242 * vga_client_register() fails with -ENODEV. 1243 */ 1244 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1245 if (ret && ret != -ENODEV) 1246 goto out; 1247 1248 intel_register_dsm_handler(); 1249 1250 ret = vga_switcheroo_register_client(dev->pdev, 1251 i915_switcheroo_set_state, 1252 NULL, 1253 i915_switcheroo_can_switch); 1254 if (ret) 1255 goto cleanup_vga_client; 1256 1257 /* IIR "flip pending" bit means done if this bit is set */ 1258 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) 1259 dev_priv->flip_pending_is_done = true; 1260 1261 intel_modeset_init(dev); 1262 1263 ret = i915_load_gem_init(dev); 1264 if (ret) 1265 goto cleanup_vga_switcheroo; 1266 1267 intel_modeset_gem_init(dev); 1268 1269 ret = drm_irq_install(dev); 1270 if (ret) 1271 goto cleanup_gem; 1272 1273 /* Always safe in the mode setting case. */ 1274 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1275 dev->vblank_disable_allowed = 1; 1276 1277 ret = intel_fbdev_init(dev); 1278 if (ret) 1279 goto cleanup_irq; 1280 1281 drm_kms_helper_poll_init(dev); 1282 1283 /* We're off and running w/KMS */ 1284 dev_priv->mm.suspended = 0; 1285 1286 return 0; 1287 1288cleanup_irq: 1289 drm_irq_uninstall(dev); 1290cleanup_gem: 1291 mutex_lock(&dev->struct_mutex); 1292 i915_gem_cleanup_ringbuffer(dev); 1293 mutex_unlock(&dev->struct_mutex); 1294cleanup_vga_switcheroo: 1295 vga_switcheroo_unregister_client(dev->pdev); 1296cleanup_vga_client: 1297 vga_client_register(dev->pdev, NULL, NULL, NULL); 1298out: 1299 return ret; 1300} 1301 1302int i915_master_create(struct drm_device *dev, struct drm_master *master) 1303{ 1304 struct drm_i915_master_private *master_priv; 1305 1306 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 1307 if (!master_priv) 1308 return -ENOMEM; 1309 1310 master->driver_priv = master_priv; 1311 return 0; 1312} 1313 1314void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1315{ 1316 struct drm_i915_master_private *master_priv = master->driver_priv; 1317 1318 if (!master_priv) 1319 return; 1320 1321 kfree(master_priv); 1322 1323 master->driver_priv = NULL; 1324} 1325 1326static void i915_pineview_get_mem_freq(struct drm_device *dev) 1327{ 1328 drm_i915_private_t *dev_priv = dev->dev_private; 1329 u32 tmp; 1330 1331 tmp = I915_READ(CLKCFG); 1332 1333 switch (tmp & CLKCFG_FSB_MASK) { 1334 case CLKCFG_FSB_533: 1335 dev_priv->fsb_freq = 533; /* 133*4 */ 1336 break; 1337 case CLKCFG_FSB_800: 1338 dev_priv->fsb_freq = 800; /* 200*4 */ 1339 break; 1340 case CLKCFG_FSB_667: 1341 dev_priv->fsb_freq = 667; /* 167*4 */ 1342 break; 1343 case CLKCFG_FSB_400: 1344 dev_priv->fsb_freq = 400; /* 100*4 */ 1345 break; 1346 } 1347 1348 switch (tmp & CLKCFG_MEM_MASK) { 1349 case CLKCFG_MEM_533: 1350 dev_priv->mem_freq = 533; 1351 break; 1352 case CLKCFG_MEM_667: 1353 dev_priv->mem_freq = 667; 1354 break; 1355 case CLKCFG_MEM_800: 1356 dev_priv->mem_freq = 800; 1357 break; 1358 } 1359 1360 /* detect pineview DDR3 setting */ 1361 tmp = I915_READ(CSHRDDR3CTL); 1362 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 1363} 1364 1365static void i915_ironlake_get_mem_freq(struct drm_device *dev) 1366{ 1367 drm_i915_private_t *dev_priv = dev->dev_private; 1368 u16 ddrpll, csipll; 1369 1370 ddrpll = I915_READ16(DDRMPLL1); 1371 csipll = I915_READ16(CSIPLL0); 1372 1373 switch (ddrpll & 0xff) { 1374 case 0xc: 1375 dev_priv->mem_freq = 800; 1376 break; 1377 case 0x10: 1378 dev_priv->mem_freq = 1066; 1379 break; 1380 case 0x14: 1381 dev_priv->mem_freq = 1333; 1382 break; 1383 case 0x18: 1384 dev_priv->mem_freq = 1600; 1385 break; 1386 default: 1387 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 1388 ddrpll & 0xff); 1389 dev_priv->mem_freq = 0; 1390 break; 1391 } 1392 1393 dev_priv->r_t = dev_priv->mem_freq; 1394 1395 switch (csipll & 0x3ff) { 1396 case 0x00c: 1397 dev_priv->fsb_freq = 3200; 1398 break; 1399 case 0x00e: 1400 dev_priv->fsb_freq = 3733; 1401 break; 1402 case 0x010: 1403 dev_priv->fsb_freq = 4266; 1404 break; 1405 case 0x012: 1406 dev_priv->fsb_freq = 4800; 1407 break; 1408 case 0x014: 1409 dev_priv->fsb_freq = 5333; 1410 break; 1411 case 0x016: 1412 dev_priv->fsb_freq = 5866; 1413 break; 1414 case 0x018: 1415 dev_priv->fsb_freq = 6400; 1416 break; 1417 default: 1418 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 1419 csipll & 0x3ff); 1420 dev_priv->fsb_freq = 0; 1421 break; 1422 } 1423 1424 if (dev_priv->fsb_freq == 3200) { 1425 dev_priv->c_m = 0; 1426 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 1427 dev_priv->c_m = 1; 1428 } else { 1429 dev_priv->c_m = 2; 1430 } 1431} 1432 1433static const struct cparams { 1434 u16 i; 1435 u16 t; 1436 u16 m; 1437 u16 c; 1438} cparams[] = { 1439 { 1, 1333, 301, 28664 }, 1440 { 1, 1066, 294, 24460 }, 1441 { 1, 800, 294, 25192 }, 1442 { 0, 1333, 276, 27605 }, 1443 { 0, 1066, 276, 27605 }, 1444 { 0, 800, 231, 23784 }, 1445}; 1446 1447unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 1448{ 1449 u64 total_count, diff, ret; 1450 u32 count1, count2, count3, m = 0, c = 0; 1451 unsigned long now = jiffies_to_msecs(jiffies), diff1; 1452 int i; 1453 1454 diff1 = now - dev_priv->last_time1; 1455 1456 count1 = I915_READ(DMIEC); 1457 count2 = I915_READ(DDREC); 1458 count3 = I915_READ(CSIEC); 1459 1460 total_count = count1 + count2 + count3; 1461 1462 /* FIXME: handle per-counter overflow */ 1463 if (total_count < dev_priv->last_count1) { 1464 diff = ~0UL - dev_priv->last_count1; 1465 diff += total_count; 1466 } else { 1467 diff = total_count - dev_priv->last_count1; 1468 } 1469 1470 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 1471 if (cparams[i].i == dev_priv->c_m && 1472 cparams[i].t == dev_priv->r_t) { 1473 m = cparams[i].m; 1474 c = cparams[i].c; 1475 break; 1476 } 1477 } 1478 1479 diff = div_u64(diff, diff1); 1480 ret = ((m * diff) + c); 1481 ret = div_u64(ret, 10); 1482 1483 dev_priv->last_count1 = total_count; 1484 dev_priv->last_time1 = now; 1485 1486 return ret; 1487} 1488 1489unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 1490{ 1491 unsigned long m, x, b; 1492 u32 tsfs; 1493 1494 tsfs = I915_READ(TSFS); 1495 1496 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 1497 x = I915_READ8(TR1); 1498 1499 b = tsfs & TSFS_INTR_MASK; 1500 1501 return ((m * x) / 127) - b; 1502} 1503 1504static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 1505{ 1506 static const struct v_table { 1507 u16 vd; /* in .1 mil */ 1508 u16 vm; /* in .1 mil */ 1509 } v_table[] = { 1510 { 0, 0, }, 1511 { 375, 0, }, 1512 { 500, 0, }, 1513 { 625, 0, }, 1514 { 750, 0, }, 1515 { 875, 0, }, 1516 { 1000, 0, }, 1517 { 1125, 0, }, 1518 { 4125, 3000, }, 1519 { 4125, 3000, }, 1520 { 4125, 3000, }, 1521 { 4125, 3000, }, 1522 { 4125, 3000, }, 1523 { 4125, 3000, }, 1524 { 4125, 3000, }, 1525 { 4125, 3000, }, 1526 { 4125, 3000, }, 1527 { 4125, 3000, }, 1528 { 4125, 3000, }, 1529 { 4125, 3000, }, 1530 { 4125, 3000, }, 1531 { 4125, 3000, }, 1532 { 4125, 3000, }, 1533 { 4125, 3000, }, 1534 { 4125, 3000, }, 1535 { 4125, 3000, }, 1536 { 4125, 3000, }, 1537 { 4125, 3000, }, 1538 { 4125, 3000, }, 1539 { 4125, 3000, }, 1540 { 4125, 3000, }, 1541 { 4125, 3000, }, 1542 { 4250, 3125, }, 1543 { 4375, 3250, }, 1544 { 4500, 3375, }, 1545 { 4625, 3500, }, 1546 { 4750, 3625, }, 1547 { 4875, 3750, }, 1548 { 5000, 3875, }, 1549 { 5125, 4000, }, 1550 { 5250, 4125, }, 1551 { 5375, 4250, }, 1552 { 5500, 4375, }, 1553 { 5625, 4500, }, 1554 { 5750, 4625, }, 1555 { 5875, 4750, }, 1556 { 6000, 4875, }, 1557 { 6125, 5000, }, 1558 { 6250, 5125, }, 1559 { 6375, 5250, }, 1560 { 6500, 5375, }, 1561 { 6625, 5500, }, 1562 { 6750, 5625, }, 1563 { 6875, 5750, }, 1564 { 7000, 5875, }, 1565 { 7125, 6000, }, 1566 { 7250, 6125, }, 1567 { 7375, 6250, }, 1568 { 7500, 6375, }, 1569 { 7625, 6500, }, 1570 { 7750, 6625, }, 1571 { 7875, 6750, }, 1572 { 8000, 6875, }, 1573 { 8125, 7000, }, 1574 { 8250, 7125, }, 1575 { 8375, 7250, }, 1576 { 8500, 7375, }, 1577 { 8625, 7500, }, 1578 { 8750, 7625, }, 1579 { 8875, 7750, }, 1580 { 9000, 7875, }, 1581 { 9125, 8000, }, 1582 { 9250, 8125, }, 1583 { 9375, 8250, }, 1584 { 9500, 8375, }, 1585 { 9625, 8500, }, 1586 { 9750, 8625, }, 1587 { 9875, 8750, }, 1588 { 10000, 8875, }, 1589 { 10125, 9000, }, 1590 { 10250, 9125, }, 1591 { 10375, 9250, }, 1592 { 10500, 9375, }, 1593 { 10625, 9500, }, 1594 { 10750, 9625, }, 1595 { 10875, 9750, }, 1596 { 11000, 9875, }, 1597 { 11125, 10000, }, 1598 { 11250, 10125, }, 1599 { 11375, 10250, }, 1600 { 11500, 10375, }, 1601 { 11625, 10500, }, 1602 { 11750, 10625, }, 1603 { 11875, 10750, }, 1604 { 12000, 10875, }, 1605 { 12125, 11000, }, 1606 { 12250, 11125, }, 1607 { 12375, 11250, }, 1608 { 12500, 11375, }, 1609 { 12625, 11500, }, 1610 { 12750, 11625, }, 1611 { 12875, 11750, }, 1612 { 13000, 11875, }, 1613 { 13125, 12000, }, 1614 { 13250, 12125, }, 1615 { 13375, 12250, }, 1616 { 13500, 12375, }, 1617 { 13625, 12500, }, 1618 { 13750, 12625, }, 1619 { 13875, 12750, }, 1620 { 14000, 12875, }, 1621 { 14125, 13000, }, 1622 { 14250, 13125, }, 1623 { 14375, 13250, }, 1624 { 14500, 13375, }, 1625 { 14625, 13500, }, 1626 { 14750, 13625, }, 1627 { 14875, 13750, }, 1628 { 15000, 13875, }, 1629 { 15125, 14000, }, 1630 { 15250, 14125, }, 1631 { 15375, 14250, }, 1632 { 15500, 14375, }, 1633 { 15625, 14500, }, 1634 { 15750, 14625, }, 1635 { 15875, 14750, }, 1636 { 16000, 14875, }, 1637 { 16125, 15000, }, 1638 }; 1639 if (dev_priv->info->is_mobile) 1640 return v_table[pxvid].vm; 1641 else 1642 return v_table[pxvid].vd; 1643} 1644 1645void i915_update_gfx_val(struct drm_i915_private *dev_priv) 1646{ 1647 struct timespec now, diff1; 1648 u64 diff; 1649 unsigned long diffms; 1650 u32 count; 1651 1652 getrawmonotonic(&now); 1653 diff1 = timespec_sub(now, dev_priv->last_time2); 1654 1655 /* Don't divide by 0 */ 1656 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 1657 if (!diffms) 1658 return; 1659 1660 count = I915_READ(GFXEC); 1661 1662 if (count < dev_priv->last_count2) { 1663 diff = ~0UL - dev_priv->last_count2; 1664 diff += count; 1665 } else { 1666 diff = count - dev_priv->last_count2; 1667 } 1668 1669 dev_priv->last_count2 = count; 1670 dev_priv->last_time2 = now; 1671 1672 /* More magic constants... */ 1673 diff = diff * 1181; 1674 diff = div_u64(diff, diffms * 10); 1675 dev_priv->gfx_power = diff; 1676} 1677 1678unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 1679{ 1680 unsigned long t, corr, state1, corr2, state2; 1681 u32 pxvid, ext_v; 1682 1683 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 1684 pxvid = (pxvid >> 24) & 0x7f; 1685 ext_v = pvid_to_extvid(dev_priv, pxvid); 1686 1687 state1 = ext_v; 1688 1689 t = i915_mch_val(dev_priv); 1690 1691 /* Revel in the empirically derived constants */ 1692 1693 /* Correction factor in 1/100000 units */ 1694 if (t > 80) 1695 corr = ((t * 2349) + 135940); 1696 else if (t >= 50) 1697 corr = ((t * 964) + 29317); 1698 else /* < 50 */ 1699 corr = ((t * 301) + 1004); 1700 1701 corr = corr * ((150142 * state1) / 10000 - 78642); 1702 corr /= 100000; 1703 corr2 = (corr * dev_priv->corr); 1704 1705 state2 = (corr2 * state1) / 10000; 1706 state2 /= 100; /* convert to mW */ 1707 1708 i915_update_gfx_val(dev_priv); 1709 1710 return dev_priv->gfx_power + state2; 1711} 1712 1713/* Global for IPS driver to get at the current i915 device */ 1714static struct drm_i915_private *i915_mch_dev; 1715/* 1716 * Lock protecting IPS related data structures 1717 * - i915_mch_dev 1718 * - dev_priv->max_delay 1719 * - dev_priv->min_delay 1720 * - dev_priv->fmax 1721 * - dev_priv->gpu_busy 1722 */ 1723static DEFINE_SPINLOCK(mchdev_lock); 1724 1725/** 1726 * i915_read_mch_val - return value for IPS use 1727 * 1728 * Calculate and return a value for the IPS driver to use when deciding whether 1729 * we have thermal and power headroom to increase CPU or GPU power budget. 1730 */ 1731unsigned long i915_read_mch_val(void) 1732{ 1733 struct drm_i915_private *dev_priv; 1734 unsigned long chipset_val, graphics_val, ret = 0; 1735 1736 spin_lock(&mchdev_lock); 1737 if (!i915_mch_dev) 1738 goto out_unlock; 1739 dev_priv = i915_mch_dev; 1740 1741 chipset_val = i915_chipset_val(dev_priv); 1742 graphics_val = i915_gfx_val(dev_priv); 1743 1744 ret = chipset_val + graphics_val; 1745 1746out_unlock: 1747 spin_unlock(&mchdev_lock); 1748 1749 return ret; 1750} 1751EXPORT_SYMBOL_GPL(i915_read_mch_val); 1752 1753/** 1754 * i915_gpu_raise - raise GPU frequency limit 1755 * 1756 * Raise the limit; IPS indicates we have thermal headroom. 1757 */ 1758bool i915_gpu_raise(void) 1759{ 1760 struct drm_i915_private *dev_priv; 1761 bool ret = true; 1762 1763 spin_lock(&mchdev_lock); 1764 if (!i915_mch_dev) { 1765 ret = false; 1766 goto out_unlock; 1767 } 1768 dev_priv = i915_mch_dev; 1769 1770 if (dev_priv->max_delay > dev_priv->fmax) 1771 dev_priv->max_delay--; 1772 1773out_unlock: 1774 spin_unlock(&mchdev_lock); 1775 1776 return ret; 1777} 1778EXPORT_SYMBOL_GPL(i915_gpu_raise); 1779 1780/** 1781 * i915_gpu_lower - lower GPU frequency limit 1782 * 1783 * IPS indicates we're close to a thermal limit, so throttle back the GPU 1784 * frequency maximum. 1785 */ 1786bool i915_gpu_lower(void) 1787{ 1788 struct drm_i915_private *dev_priv; 1789 bool ret = true; 1790 1791 spin_lock(&mchdev_lock); 1792 if (!i915_mch_dev) { 1793 ret = false; 1794 goto out_unlock; 1795 } 1796 dev_priv = i915_mch_dev; 1797 1798 if (dev_priv->max_delay < dev_priv->min_delay) 1799 dev_priv->max_delay++; 1800 1801out_unlock: 1802 spin_unlock(&mchdev_lock); 1803 1804 return ret; 1805} 1806EXPORT_SYMBOL_GPL(i915_gpu_lower); 1807 1808/** 1809 * i915_gpu_busy - indicate GPU business to IPS 1810 * 1811 * Tell the IPS driver whether or not the GPU is busy. 1812 */ 1813bool i915_gpu_busy(void) 1814{ 1815 struct drm_i915_private *dev_priv; 1816 bool ret = false; 1817 1818 spin_lock(&mchdev_lock); 1819 if (!i915_mch_dev) 1820 goto out_unlock; 1821 dev_priv = i915_mch_dev; 1822 1823 ret = dev_priv->busy; 1824 1825out_unlock: 1826 spin_unlock(&mchdev_lock); 1827 1828 return ret; 1829} 1830EXPORT_SYMBOL_GPL(i915_gpu_busy); 1831 1832/** 1833 * i915_gpu_turbo_disable - disable graphics turbo 1834 * 1835 * Disable graphics turbo by resetting the max frequency and setting the 1836 * current frequency to the default. 1837 */ 1838bool i915_gpu_turbo_disable(void) 1839{ 1840 struct drm_i915_private *dev_priv; 1841 bool ret = true; 1842 1843 spin_lock(&mchdev_lock); 1844 if (!i915_mch_dev) { 1845 ret = false; 1846 goto out_unlock; 1847 } 1848 dev_priv = i915_mch_dev; 1849 1850 dev_priv->max_delay = dev_priv->fstart; 1851 1852 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 1853 ret = false; 1854 1855out_unlock: 1856 spin_unlock(&mchdev_lock); 1857 1858 return ret; 1859} 1860EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 1861 1862/** 1863 * Tells the intel_ips driver that the i915 driver is now loaded, if 1864 * IPS got loaded first. 1865 * 1866 * This awkward dance is so that neither module has to depend on the 1867 * other in order for IPS to do the appropriate communication of 1868 * GPU turbo limits to i915. 1869 */ 1870static void 1871ips_ping_for_i915_load(void) 1872{ 1873 void (*link)(void); 1874 1875 link = symbol_get(ips_link_to_i915_driver); 1876 if (link) { 1877 link(); 1878 symbol_put(ips_link_to_i915_driver); 1879 } 1880} 1881 1882/** 1883 * i915_driver_load - setup chip and create an initial config 1884 * @dev: DRM device 1885 * @flags: startup flags 1886 * 1887 * The driver load routine has to do several things: 1888 * - drive output discovery via intel_modeset_init() 1889 * - initialize the memory manager 1890 * - allocate initial config memory 1891 * - setup the DRM framebuffer with the allocated memory 1892 */ 1893int i915_driver_load(struct drm_device *dev, unsigned long flags) 1894{ 1895 struct drm_i915_private *dev_priv; 1896 int ret = 0, mmio_bar; 1897 uint32_t agp_size; 1898 1899 /* i915 has 4 more counters */ 1900 dev->counters += 4; 1901 dev->types[6] = _DRM_STAT_IRQ; 1902 dev->types[7] = _DRM_STAT_PRIMARY; 1903 dev->types[8] = _DRM_STAT_SECONDARY; 1904 dev->types[9] = _DRM_STAT_DMA; 1905 1906 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); 1907 if (dev_priv == NULL) 1908 return -ENOMEM; 1909 1910 dev->dev_private = (void *)dev_priv; 1911 dev_priv->dev = dev; 1912 dev_priv->info = (struct intel_device_info *) flags; 1913 1914 if (i915_get_bridge_dev(dev)) { 1915 ret = -EIO; 1916 goto free_priv; 1917 } 1918 1919 /* overlay on gen2 is broken and can't address above 1G */ 1920 if (IS_GEN2(dev)) 1921 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1922 1923 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1924 * using 32bit addressing, overwriting memory if HWS is located 1925 * above 4GB. 1926 * 1927 * The documentation also mentions an issue with undefined 1928 * behaviour if any general state is accessed within a page above 4GB, 1929 * which also needs to be handled carefully. 1930 */ 1931 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1932 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1933 1934 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1935 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1936 if (!dev_priv->regs) { 1937 DRM_ERROR("failed to map registers\n"); 1938 ret = -EIO; 1939 goto put_bridge; 1940 } 1941 1942 dev_priv->mm.gtt = intel_gtt_get(); 1943 if (!dev_priv->mm.gtt) { 1944 DRM_ERROR("Failed to initialize GTT\n"); 1945 ret = -ENODEV; 1946 goto out_iomapfree; 1947 } 1948 1949 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1950 1951 dev_priv->mm.gtt_mapping = 1952 io_mapping_create_wc(dev->agp->base, agp_size); 1953 if (dev_priv->mm.gtt_mapping == NULL) { 1954 ret = -EIO; 1955 goto out_rmmap; 1956 } 1957 1958 /* Set up a WC MTRR for non-PAT systems. This is more common than 1959 * one would think, because the kernel disables PAT on first 1960 * generation Core chips because WC PAT gets overridden by a UC 1961 * MTRR if present. Even if a UC MTRR isn't present. 1962 */ 1963 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 1964 agp_size, 1965 MTRR_TYPE_WRCOMB, 1); 1966 if (dev_priv->mm.gtt_mtrr < 0) { 1967 DRM_INFO("MTRR allocation failed. Graphics " 1968 "performance may suffer.\n"); 1969 } 1970 1971 /* The i915 workqueue is primarily used for batched retirement of 1972 * requests (and thus managing bo) once the task has been completed 1973 * by the GPU. i915_gem_retire_requests() is called directly when we 1974 * need high-priority retirement, such as waiting for an explicit 1975 * bo. 1976 * 1977 * It is also used for periodic low-priority events, such as 1978 * idle-timers and recording error state. 1979 * 1980 * All tasks on the workqueue are expected to acquire the dev mutex 1981 * so there is no point in running more than one instance of the 1982 * workqueue at any time: max_active = 1 and NON_REENTRANT. 1983 */ 1984 dev_priv->wq = alloc_workqueue("i915", 1985 WQ_UNBOUND | WQ_NON_REENTRANT, 1986 1); 1987 if (dev_priv->wq == NULL) { 1988 DRM_ERROR("Failed to create our workqueue.\n"); 1989 ret = -ENOMEM; 1990 goto out_iomapfree; 1991 } 1992 1993 /* enable GEM by default */ 1994 dev_priv->has_gem = 1; 1995 1996 intel_irq_init(dev); 1997 1998 /* Try to make sure MCHBAR is enabled before poking at it */ 1999 intel_setup_mchbar(dev); 2000 intel_setup_gmbus(dev); 2001 intel_opregion_setup(dev); 2002 2003 /* Make sure the bios did its job and set up vital registers */ 2004 intel_setup_bios(dev); 2005 2006 i915_gem_load(dev); 2007 2008 /* Init HWS */ 2009 if (!I915_NEED_GFX_HWS(dev)) { 2010 ret = i915_init_phys_hws(dev); 2011 if (ret) 2012 goto out_gem_unload; 2013 } 2014 2015 if (IS_PINEVIEW(dev)) 2016 i915_pineview_get_mem_freq(dev); 2017 else if (IS_GEN5(dev)) 2018 i915_ironlake_get_mem_freq(dev); 2019 2020 /* On the 945G/GM, the chipset reports the MSI capability on the 2021 * integrated graphics even though the support isn't actually there 2022 * according to the published specs. It doesn't appear to function 2023 * correctly in testing on 945G. 2024 * This may be a side effect of MSI having been made available for PEG 2025 * and the registers being closely associated. 2026 * 2027 * According to chipset errata, on the 965GM, MSI interrupts may 2028 * be lost or delayed, but we use them anyways to avoid 2029 * stuck interrupts on some machines. 2030 */ 2031 if (!IS_I945G(dev) && !IS_I945GM(dev)) 2032 pci_enable_msi(dev->pdev); 2033 2034 spin_lock_init(&dev_priv->irq_lock); 2035 spin_lock_init(&dev_priv->error_lock); 2036 spin_lock_init(&dev_priv->rps_lock); 2037 2038 if (IS_MOBILE(dev) || !IS_GEN2(dev)) 2039 dev_priv->num_pipe = 2; 2040 else 2041 dev_priv->num_pipe = 1; 2042 2043 ret = drm_vblank_init(dev, dev_priv->num_pipe); 2044 if (ret) 2045 goto out_gem_unload; 2046 2047 /* Start out suspended */ 2048 dev_priv->mm.suspended = 1; 2049 2050 intel_detect_pch(dev); 2051 2052 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2053 ret = i915_load_modeset_init(dev); 2054 if (ret < 0) { 2055 DRM_ERROR("failed to init modeset\n"); 2056 goto out_gem_unload; 2057 } 2058 } 2059 2060 /* Must be done after probing outputs */ 2061 intel_opregion_init(dev); 2062 acpi_video_register(); 2063 2064 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2065 (unsigned long) dev); 2066 2067 spin_lock(&mchdev_lock); 2068 i915_mch_dev = dev_priv; 2069 dev_priv->mchdev_lock = &mchdev_lock; 2070 spin_unlock(&mchdev_lock); 2071 2072 ips_ping_for_i915_load(); 2073 2074 return 0; 2075 2076out_gem_unload: 2077 if (dev->pdev->msi_enabled) 2078 pci_disable_msi(dev->pdev); 2079 2080 intel_teardown_gmbus(dev); 2081 intel_teardown_mchbar(dev); 2082 destroy_workqueue(dev_priv->wq); 2083out_iomapfree: 2084 io_mapping_free(dev_priv->mm.gtt_mapping); 2085out_rmmap: 2086 pci_iounmap(dev->pdev, dev_priv->regs); 2087put_bridge: 2088 pci_dev_put(dev_priv->bridge_dev); 2089free_priv: 2090 kfree(dev_priv); 2091 return ret; 2092} 2093 2094int i915_driver_unload(struct drm_device *dev) 2095{ 2096 struct drm_i915_private *dev_priv = dev->dev_private; 2097 int ret; 2098 2099 spin_lock(&mchdev_lock); 2100 i915_mch_dev = NULL; 2101 spin_unlock(&mchdev_lock); 2102 2103 if (dev_priv->mm.inactive_shrinker.shrink) 2104 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 2105 2106 mutex_lock(&dev->struct_mutex); 2107 ret = i915_gpu_idle(dev); 2108 if (ret) 2109 DRM_ERROR("failed to idle hardware: %d\n", ret); 2110 mutex_unlock(&dev->struct_mutex); 2111 2112 /* Cancel the retire work handler, which should be idle now. */ 2113 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 2114 2115 io_mapping_free(dev_priv->mm.gtt_mapping); 2116 if (dev_priv->mm.gtt_mtrr >= 0) { 2117 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 2118 dev->agp->agp_info.aper_size * 1024 * 1024); 2119 dev_priv->mm.gtt_mtrr = -1; 2120 } 2121 2122 acpi_video_unregister(); 2123 2124 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2125 intel_fbdev_fini(dev); 2126 intel_modeset_cleanup(dev); 2127 2128 /* 2129 * free the memory space allocated for the child device 2130 * config parsed from VBT 2131 */ 2132 if (dev_priv->child_dev && dev_priv->child_dev_num) { 2133 kfree(dev_priv->child_dev); 2134 dev_priv->child_dev = NULL; 2135 dev_priv->child_dev_num = 0; 2136 } 2137 2138 vga_switcheroo_unregister_client(dev->pdev); 2139 vga_client_register(dev->pdev, NULL, NULL, NULL); 2140 } 2141 2142 /* Free error state after interrupts are fully disabled. */ 2143 del_timer_sync(&dev_priv->hangcheck_timer); 2144 cancel_work_sync(&dev_priv->error_work); 2145 i915_destroy_error_state(dev); 2146 2147 if (dev->pdev->msi_enabled) 2148 pci_disable_msi(dev->pdev); 2149 2150 intel_opregion_fini(dev); 2151 2152 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2153 /* Flush any outstanding unpin_work. */ 2154 flush_workqueue(dev_priv->wq); 2155 2156 mutex_lock(&dev->struct_mutex); 2157 i915_gem_free_all_phys_object(dev); 2158 i915_gem_cleanup_ringbuffer(dev); 2159 mutex_unlock(&dev->struct_mutex); 2160 if (I915_HAS_FBC(dev) && i915_powersave) 2161 i915_cleanup_compression(dev); 2162 drm_mm_takedown(&dev_priv->mm.stolen); 2163 2164 intel_cleanup_overlay(dev); 2165 2166 if (!I915_NEED_GFX_HWS(dev)) 2167 i915_free_hws(dev); 2168 } 2169 2170 if (dev_priv->regs != NULL) 2171 pci_iounmap(dev->pdev, dev_priv->regs); 2172 2173 intel_teardown_gmbus(dev); 2174 intel_teardown_mchbar(dev); 2175 2176 destroy_workqueue(dev_priv->wq); 2177 2178 pci_dev_put(dev_priv->bridge_dev); 2179 kfree(dev->dev_private); 2180 2181 return 0; 2182} 2183 2184int i915_driver_open(struct drm_device *dev, struct drm_file *file) 2185{ 2186 struct drm_i915_file_private *file_priv; 2187 2188 DRM_DEBUG_DRIVER("\n"); 2189 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); 2190 if (!file_priv) 2191 return -ENOMEM; 2192 2193 file->driver_priv = file_priv; 2194 2195 spin_lock_init(&file_priv->mm.lock); 2196 INIT_LIST_HEAD(&file_priv->mm.request_list); 2197 2198 return 0; 2199} 2200 2201/** 2202 * i915_driver_lastclose - clean up after all DRM clients have exited 2203 * @dev: DRM device 2204 * 2205 * Take care of cleaning up after all DRM clients have exited. In the 2206 * mode setting case, we want to restore the kernel's initial mode (just 2207 * in case the last client left us in a bad state). 2208 * 2209 * Additionally, in the non-mode setting case, we'll tear down the AGP 2210 * and DMA structures, since the kernel won't be using them, and clea 2211 * up any GEM state. 2212 */ 2213void i915_driver_lastclose(struct drm_device * dev) 2214{ 2215 drm_i915_private_t *dev_priv = dev->dev_private; 2216 2217 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 2218 intel_fb_restore_mode(dev); 2219 vga_switcheroo_process_delayed_switch(); 2220 return; 2221 } 2222 2223 i915_gem_lastclose(dev); 2224 2225 if (dev_priv->agp_heap) 2226 i915_mem_takedown(&(dev_priv->agp_heap)); 2227 2228 i915_dma_cleanup(dev); 2229} 2230 2231void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 2232{ 2233 drm_i915_private_t *dev_priv = dev->dev_private; 2234 i915_gem_release(dev, file_priv); 2235 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2236 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 2237} 2238 2239void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 2240{ 2241 struct drm_i915_file_private *file_priv = file->driver_priv; 2242 2243 kfree(file_priv); 2244} 2245 2246struct drm_ioctl_desc i915_ioctls[] = { 2247 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2248 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 2249 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), 2250 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 2251 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 2252 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 2253 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), 2254 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2255 DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), 2256 DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), 2257 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2258 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 2259 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2260 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2261 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), 2262 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 2263 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2264 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2265 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 2266 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 2267 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2268 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2269 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 2270 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 2271 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2272 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2273 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 2274 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 2275 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 2276 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 2277 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 2278 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 2279 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 2280 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 2281 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 2282 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 2283 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 2284 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2285 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2286 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2287}; 2288 2289int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2290 2291/** 2292 * Determine if the device really is AGP or not. 2293 * 2294 * All Intel graphics chipsets are treated as AGP, even if they are really 2295 * PCI-e. 2296 * 2297 * \param dev The device to be tested. 2298 * 2299 * \returns 2300 * A value of 1 is always retured to indictate every i9x5 is AGP. 2301 */ 2302int i915_driver_device_is_agp(struct drm_device * dev) 2303{ 2304 return 1; 2305} 2306