i915_dma.c revision 1ae8c0a56eeb3ed358b78ccadd024d6b721f26bc
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include "drmP.h" 30#include "drm.h" 31#include "drm_crtc_helper.h" 32#include "intel_drv.h" 33#include "i915_drm.h" 34#include "i915_drv.h" 35 36#define I915_DRV "i915_drv" 37 38/* Really want an OS-independent resettable timer. Would like to have 39 * this loop run for (eg) 3 sec, but have the timer reset every time 40 * the head pointer changes, so that EBUSY only happens if the ring 41 * actually stalls for (eg) 3 seconds. 42 */ 43int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 44{ 45 drm_i915_private_t *dev_priv = dev->dev_private; 46 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 47 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 48 u32 last_acthd = I915_READ(acthd_reg); 49 u32 acthd; 50 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 51 int i; 52 53 for (i = 0; i < 100000; i++) { 54 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 55 acthd = I915_READ(acthd_reg); 56 ring->space = ring->head - (ring->tail + 8); 57 if (ring->space < 0) 58 ring->space += ring->Size; 59 if (ring->space >= n) 60 return 0; 61 62 if (dev->primary->master) { 63 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 64 if (master_priv->sarea_priv) 65 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 66 } 67 68 69 if (ring->head != last_head) 70 i = 0; 71 if (acthd != last_acthd) 72 i = 0; 73 74 last_head = ring->head; 75 last_acthd = acthd; 76 msleep_interruptible(10); 77 78 } 79 80 return -EBUSY; 81} 82 83/** 84 * Sets up the hardware status page for devices that need a physical address 85 * in the register. 86 */ 87static int i915_init_phys_hws(struct drm_device *dev) 88{ 89 drm_i915_private_t *dev_priv = dev->dev_private; 90 /* Program Hardware Status Page */ 91 dev_priv->status_page_dmah = 92 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 93 94 if (!dev_priv->status_page_dmah) { 95 DRM_ERROR("Can not allocate hardware status page\n"); 96 return -ENOMEM; 97 } 98 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 99 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 100 101 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 102 103 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 104 DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n"); 105 return 0; 106} 107 108/** 109 * Frees the hardware status page, whether it's a physical address or a virtual 110 * address set up by the X Server. 111 */ 112static void i915_free_hws(struct drm_device *dev) 113{ 114 drm_i915_private_t *dev_priv = dev->dev_private; 115 if (dev_priv->status_page_dmah) { 116 drm_pci_free(dev, dev_priv->status_page_dmah); 117 dev_priv->status_page_dmah = NULL; 118 } 119 120 if (dev_priv->status_gfx_addr) { 121 dev_priv->status_gfx_addr = 0; 122 drm_core_ioremapfree(&dev_priv->hws_map, dev); 123 } 124 125 /* Need to rewrite hardware status page */ 126 I915_WRITE(HWS_PGA, 0x1ffff000); 127} 128 129void i915_kernel_lost_context(struct drm_device * dev) 130{ 131 drm_i915_private_t *dev_priv = dev->dev_private; 132 struct drm_i915_master_private *master_priv; 133 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 134 135 /* 136 * We should never lose context on the ring with modesetting 137 * as we don't expose it to userspace 138 */ 139 if (drm_core_check_feature(dev, DRIVER_MODESET)) 140 return; 141 142 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 143 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 144 ring->space = ring->head - (ring->tail + 8); 145 if (ring->space < 0) 146 ring->space += ring->Size; 147 148 if (!dev->primary->master) 149 return; 150 151 master_priv = dev->primary->master->driver_priv; 152 if (ring->head == ring->tail && master_priv->sarea_priv) 153 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 154} 155 156static int i915_dma_cleanup(struct drm_device * dev) 157{ 158 drm_i915_private_t *dev_priv = dev->dev_private; 159 /* Make sure interrupts are disabled here because the uninstall ioctl 160 * may not have been called from userspace and after dev_private 161 * is freed, it's too late. 162 */ 163 if (dev->irq_enabled) 164 drm_irq_uninstall(dev); 165 166 if (dev_priv->ring.virtual_start) { 167 drm_core_ioremapfree(&dev_priv->ring.map, dev); 168 dev_priv->ring.virtual_start = NULL; 169 dev_priv->ring.map.handle = NULL; 170 dev_priv->ring.map.size = 0; 171 } 172 173 /* Clear the HWS virtual address at teardown */ 174 if (I915_NEED_GFX_HWS(dev)) 175 i915_free_hws(dev); 176 177 return 0; 178} 179 180static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 181{ 182 drm_i915_private_t *dev_priv = dev->dev_private; 183 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 184 185 master_priv->sarea = drm_getsarea(dev); 186 if (master_priv->sarea) { 187 master_priv->sarea_priv = (drm_i915_sarea_t *) 188 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 189 } else { 190 DRM_DEBUG_DRIVER(I915_DRV, 191 "sarea not found assuming DRI2 userspace\n"); 192 } 193 194 if (init->ring_size != 0) { 195 if (dev_priv->ring.ring_obj != NULL) { 196 i915_dma_cleanup(dev); 197 DRM_ERROR("Client tried to initialize ringbuffer in " 198 "GEM mode\n"); 199 return -EINVAL; 200 } 201 202 dev_priv->ring.Size = init->ring_size; 203 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 204 205 dev_priv->ring.map.offset = init->ring_start; 206 dev_priv->ring.map.size = init->ring_size; 207 dev_priv->ring.map.type = 0; 208 dev_priv->ring.map.flags = 0; 209 dev_priv->ring.map.mtrr = 0; 210 211 drm_core_ioremap_wc(&dev_priv->ring.map, dev); 212 213 if (dev_priv->ring.map.handle == NULL) { 214 i915_dma_cleanup(dev); 215 DRM_ERROR("can not ioremap virtual address for" 216 " ring buffer\n"); 217 return -ENOMEM; 218 } 219 } 220 221 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 222 223 dev_priv->cpp = init->cpp; 224 dev_priv->back_offset = init->back_offset; 225 dev_priv->front_offset = init->front_offset; 226 dev_priv->current_page = 0; 227 if (master_priv->sarea_priv) 228 master_priv->sarea_priv->pf_current_page = 0; 229 230 /* Allow hardware batchbuffers unless told otherwise. 231 */ 232 dev_priv->allow_batchbuffer = 1; 233 234 return 0; 235} 236 237static int i915_dma_resume(struct drm_device * dev) 238{ 239 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 240 241 DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__); 242 243 if (dev_priv->ring.map.handle == NULL) { 244 DRM_ERROR("can not ioremap virtual address for" 245 " ring buffer\n"); 246 return -ENOMEM; 247 } 248 249 /* Program Hardware Status Page */ 250 if (!dev_priv->hw_status_page) { 251 DRM_ERROR("Can not find hardware status page\n"); 252 return -EINVAL; 253 } 254 DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n", 255 dev_priv->hw_status_page); 256 257 if (dev_priv->status_gfx_addr != 0) 258 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 259 else 260 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 261 DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n"); 262 263 return 0; 264} 265 266static int i915_dma_init(struct drm_device *dev, void *data, 267 struct drm_file *file_priv) 268{ 269 drm_i915_init_t *init = data; 270 int retcode = 0; 271 272 switch (init->func) { 273 case I915_INIT_DMA: 274 retcode = i915_initialize(dev, init); 275 break; 276 case I915_CLEANUP_DMA: 277 retcode = i915_dma_cleanup(dev); 278 break; 279 case I915_RESUME_DMA: 280 retcode = i915_dma_resume(dev); 281 break; 282 default: 283 retcode = -EINVAL; 284 break; 285 } 286 287 return retcode; 288} 289 290/* Implement basically the same security restrictions as hardware does 291 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 292 * 293 * Most of the calculations below involve calculating the size of a 294 * particular instruction. It's important to get the size right as 295 * that tells us where the next instruction to check is. Any illegal 296 * instruction detected will be given a size of zero, which is a 297 * signal to abort the rest of the buffer. 298 */ 299static int do_validate_cmd(int cmd) 300{ 301 switch (((cmd >> 29) & 0x7)) { 302 case 0x0: 303 switch ((cmd >> 23) & 0x3f) { 304 case 0x0: 305 return 1; /* MI_NOOP */ 306 case 0x4: 307 return 1; /* MI_FLUSH */ 308 default: 309 return 0; /* disallow everything else */ 310 } 311 break; 312 case 0x1: 313 return 0; /* reserved */ 314 case 0x2: 315 return (cmd & 0xff) + 2; /* 2d commands */ 316 case 0x3: 317 if (((cmd >> 24) & 0x1f) <= 0x18) 318 return 1; 319 320 switch ((cmd >> 24) & 0x1f) { 321 case 0x1c: 322 return 1; 323 case 0x1d: 324 switch ((cmd >> 16) & 0xff) { 325 case 0x3: 326 return (cmd & 0x1f) + 2; 327 case 0x4: 328 return (cmd & 0xf) + 2; 329 default: 330 return (cmd & 0xffff) + 2; 331 } 332 case 0x1e: 333 if (cmd & (1 << 23)) 334 return (cmd & 0xffff) + 1; 335 else 336 return 1; 337 case 0x1f: 338 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 339 return (cmd & 0x1ffff) + 2; 340 else if (cmd & (1 << 17)) /* indirect random */ 341 if ((cmd & 0xffff) == 0) 342 return 0; /* unknown length, too hard */ 343 else 344 return (((cmd & 0xffff) + 1) / 2) + 1; 345 else 346 return 2; /* indirect sequential */ 347 default: 348 return 0; 349 } 350 default: 351 return 0; 352 } 353 354 return 0; 355} 356 357static int validate_cmd(int cmd) 358{ 359 int ret = do_validate_cmd(cmd); 360 361/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ 362 363 return ret; 364} 365 366static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 367{ 368 drm_i915_private_t *dev_priv = dev->dev_private; 369 int i; 370 RING_LOCALS; 371 372 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 373 return -EINVAL; 374 375 BEGIN_LP_RING((dwords+1)&~1); 376 377 for (i = 0; i < dwords;) { 378 int cmd, sz; 379 380 cmd = buffer[i]; 381 382 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 383 return -EINVAL; 384 385 OUT_RING(cmd); 386 387 while (++i, --sz) { 388 OUT_RING(buffer[i]); 389 } 390 } 391 392 if (dwords & 1) 393 OUT_RING(0); 394 395 ADVANCE_LP_RING(); 396 397 return 0; 398} 399 400int 401i915_emit_box(struct drm_device *dev, 402 struct drm_clip_rect *boxes, 403 int i, int DR1, int DR4) 404{ 405 drm_i915_private_t *dev_priv = dev->dev_private; 406 struct drm_clip_rect box = boxes[i]; 407 RING_LOCALS; 408 409 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 410 DRM_ERROR("Bad box %d,%d..%d,%d\n", 411 box.x1, box.y1, box.x2, box.y2); 412 return -EINVAL; 413 } 414 415 if (IS_I965G(dev)) { 416 BEGIN_LP_RING(4); 417 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 418 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 419 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 420 OUT_RING(DR4); 421 ADVANCE_LP_RING(); 422 } else { 423 BEGIN_LP_RING(6); 424 OUT_RING(GFX_OP_DRAWRECT_INFO); 425 OUT_RING(DR1); 426 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 427 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 428 OUT_RING(DR4); 429 OUT_RING(0); 430 ADVANCE_LP_RING(); 431 } 432 433 return 0; 434} 435 436/* XXX: Emitting the counter should really be moved to part of the IRQ 437 * emit. For now, do it in both places: 438 */ 439 440static void i915_emit_breadcrumb(struct drm_device *dev) 441{ 442 drm_i915_private_t *dev_priv = dev->dev_private; 443 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 444 RING_LOCALS; 445 446 dev_priv->counter++; 447 if (dev_priv->counter > 0x7FFFFFFFUL) 448 dev_priv->counter = 0; 449 if (master_priv->sarea_priv) 450 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 451 452 BEGIN_LP_RING(4); 453 OUT_RING(MI_STORE_DWORD_INDEX); 454 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 455 OUT_RING(dev_priv->counter); 456 OUT_RING(0); 457 ADVANCE_LP_RING(); 458} 459 460static int i915_dispatch_cmdbuffer(struct drm_device * dev, 461 drm_i915_cmdbuffer_t *cmd, 462 struct drm_clip_rect *cliprects, 463 void *cmdbuf) 464{ 465 int nbox = cmd->num_cliprects; 466 int i = 0, count, ret; 467 468 if (cmd->sz & 0x3) { 469 DRM_ERROR("alignment"); 470 return -EINVAL; 471 } 472 473 i915_kernel_lost_context(dev); 474 475 count = nbox ? nbox : 1; 476 477 for (i = 0; i < count; i++) { 478 if (i < nbox) { 479 ret = i915_emit_box(dev, cliprects, i, 480 cmd->DR1, cmd->DR4); 481 if (ret) 482 return ret; 483 } 484 485 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 486 if (ret) 487 return ret; 488 } 489 490 i915_emit_breadcrumb(dev); 491 return 0; 492} 493 494static int i915_dispatch_batchbuffer(struct drm_device * dev, 495 drm_i915_batchbuffer_t * batch, 496 struct drm_clip_rect *cliprects) 497{ 498 drm_i915_private_t *dev_priv = dev->dev_private; 499 int nbox = batch->num_cliprects; 500 int i = 0, count; 501 RING_LOCALS; 502 503 if ((batch->start | batch->used) & 0x7) { 504 DRM_ERROR("alignment"); 505 return -EINVAL; 506 } 507 508 i915_kernel_lost_context(dev); 509 510 count = nbox ? nbox : 1; 511 512 for (i = 0; i < count; i++) { 513 if (i < nbox) { 514 int ret = i915_emit_box(dev, cliprects, i, 515 batch->DR1, batch->DR4); 516 if (ret) 517 return ret; 518 } 519 520 if (!IS_I830(dev) && !IS_845G(dev)) { 521 BEGIN_LP_RING(2); 522 if (IS_I965G(dev)) { 523 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 524 OUT_RING(batch->start); 525 } else { 526 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 527 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 528 } 529 ADVANCE_LP_RING(); 530 } else { 531 BEGIN_LP_RING(4); 532 OUT_RING(MI_BATCH_BUFFER); 533 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 534 OUT_RING(batch->start + batch->used - 4); 535 OUT_RING(0); 536 ADVANCE_LP_RING(); 537 } 538 } 539 540 i915_emit_breadcrumb(dev); 541 542 return 0; 543} 544 545static int i915_dispatch_flip(struct drm_device * dev) 546{ 547 drm_i915_private_t *dev_priv = dev->dev_private; 548 struct drm_i915_master_private *master_priv = 549 dev->primary->master->driver_priv; 550 RING_LOCALS; 551 552 if (!master_priv->sarea_priv) 553 return -EINVAL; 554 555 DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n", 556 __func__, 557 dev_priv->current_page, 558 master_priv->sarea_priv->pf_current_page); 559 560 i915_kernel_lost_context(dev); 561 562 BEGIN_LP_RING(2); 563 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 564 OUT_RING(0); 565 ADVANCE_LP_RING(); 566 567 BEGIN_LP_RING(6); 568 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 569 OUT_RING(0); 570 if (dev_priv->current_page == 0) { 571 OUT_RING(dev_priv->back_offset); 572 dev_priv->current_page = 1; 573 } else { 574 OUT_RING(dev_priv->front_offset); 575 dev_priv->current_page = 0; 576 } 577 OUT_RING(0); 578 ADVANCE_LP_RING(); 579 580 BEGIN_LP_RING(2); 581 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 582 OUT_RING(0); 583 ADVANCE_LP_RING(); 584 585 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 586 587 BEGIN_LP_RING(4); 588 OUT_RING(MI_STORE_DWORD_INDEX); 589 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 590 OUT_RING(dev_priv->counter); 591 OUT_RING(0); 592 ADVANCE_LP_RING(); 593 594 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 595 return 0; 596} 597 598static int i915_quiescent(struct drm_device * dev) 599{ 600 drm_i915_private_t *dev_priv = dev->dev_private; 601 602 i915_kernel_lost_context(dev); 603 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); 604} 605 606static int i915_flush_ioctl(struct drm_device *dev, void *data, 607 struct drm_file *file_priv) 608{ 609 int ret; 610 611 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 612 613 mutex_lock(&dev->struct_mutex); 614 ret = i915_quiescent(dev); 615 mutex_unlock(&dev->struct_mutex); 616 617 return ret; 618} 619 620static int i915_batchbuffer(struct drm_device *dev, void *data, 621 struct drm_file *file_priv) 622{ 623 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 624 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 625 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 626 master_priv->sarea_priv; 627 drm_i915_batchbuffer_t *batch = data; 628 int ret; 629 struct drm_clip_rect *cliprects = NULL; 630 631 if (!dev_priv->allow_batchbuffer) { 632 DRM_ERROR("Batchbuffer ioctl disabled\n"); 633 return -EINVAL; 634 } 635 636 DRM_DEBUG_DRIVER(I915_DRV, 637 "i915 batchbuffer, start %x used %d cliprects %d\n", 638 batch->start, batch->used, batch->num_cliprects); 639 640 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 641 642 if (batch->num_cliprects < 0) 643 return -EINVAL; 644 645 if (batch->num_cliprects) { 646 cliprects = kcalloc(batch->num_cliprects, 647 sizeof(struct drm_clip_rect), 648 GFP_KERNEL); 649 if (cliprects == NULL) 650 return -ENOMEM; 651 652 ret = copy_from_user(cliprects, batch->cliprects, 653 batch->num_cliprects * 654 sizeof(struct drm_clip_rect)); 655 if (ret != 0) 656 goto fail_free; 657 } 658 659 mutex_lock(&dev->struct_mutex); 660 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 661 mutex_unlock(&dev->struct_mutex); 662 663 if (sarea_priv) 664 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 665 666fail_free: 667 kfree(cliprects); 668 669 return ret; 670} 671 672static int i915_cmdbuffer(struct drm_device *dev, void *data, 673 struct drm_file *file_priv) 674{ 675 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 676 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 677 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 678 master_priv->sarea_priv; 679 drm_i915_cmdbuffer_t *cmdbuf = data; 680 struct drm_clip_rect *cliprects = NULL; 681 void *batch_data; 682 int ret; 683 684 DRM_DEBUG_DRIVER(I915_DRV, 685 "i915 cmdbuffer, buf %p sz %d cliprects %d\n", 686 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 687 688 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 689 690 if (cmdbuf->num_cliprects < 0) 691 return -EINVAL; 692 693 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); 694 if (batch_data == NULL) 695 return -ENOMEM; 696 697 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 698 if (ret != 0) 699 goto fail_batch_free; 700 701 if (cmdbuf->num_cliprects) { 702 cliprects = kcalloc(cmdbuf->num_cliprects, 703 sizeof(struct drm_clip_rect), GFP_KERNEL); 704 if (cliprects == NULL) 705 goto fail_batch_free; 706 707 ret = copy_from_user(cliprects, cmdbuf->cliprects, 708 cmdbuf->num_cliprects * 709 sizeof(struct drm_clip_rect)); 710 if (ret != 0) 711 goto fail_clip_free; 712 } 713 714 mutex_lock(&dev->struct_mutex); 715 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 716 mutex_unlock(&dev->struct_mutex); 717 if (ret) { 718 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 719 goto fail_clip_free; 720 } 721 722 if (sarea_priv) 723 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 724 725fail_clip_free: 726 kfree(cliprects); 727fail_batch_free: 728 kfree(batch_data); 729 730 return ret; 731} 732 733static int i915_flip_bufs(struct drm_device *dev, void *data, 734 struct drm_file *file_priv) 735{ 736 int ret; 737 738 DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__); 739 740 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 741 742 mutex_lock(&dev->struct_mutex); 743 ret = i915_dispatch_flip(dev); 744 mutex_unlock(&dev->struct_mutex); 745 746 return ret; 747} 748 749static int i915_getparam(struct drm_device *dev, void *data, 750 struct drm_file *file_priv) 751{ 752 drm_i915_private_t *dev_priv = dev->dev_private; 753 drm_i915_getparam_t *param = data; 754 int value; 755 756 if (!dev_priv) { 757 DRM_ERROR("called with no initialization\n"); 758 return -EINVAL; 759 } 760 761 switch (param->param) { 762 case I915_PARAM_IRQ_ACTIVE: 763 value = dev->pdev->irq ? 1 : 0; 764 break; 765 case I915_PARAM_ALLOW_BATCHBUFFER: 766 value = dev_priv->allow_batchbuffer ? 1 : 0; 767 break; 768 case I915_PARAM_LAST_DISPATCH: 769 value = READ_BREADCRUMB(dev_priv); 770 break; 771 case I915_PARAM_CHIPSET_ID: 772 value = dev->pci_device; 773 break; 774 case I915_PARAM_HAS_GEM: 775 value = dev_priv->has_gem; 776 break; 777 case I915_PARAM_NUM_FENCES_AVAIL: 778 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 779 break; 780 default: 781 DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n", 782 param->param); 783 return -EINVAL; 784 } 785 786 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 787 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 788 return -EFAULT; 789 } 790 791 return 0; 792} 793 794static int i915_setparam(struct drm_device *dev, void *data, 795 struct drm_file *file_priv) 796{ 797 drm_i915_private_t *dev_priv = dev->dev_private; 798 drm_i915_setparam_t *param = data; 799 800 if (!dev_priv) { 801 DRM_ERROR("called with no initialization\n"); 802 return -EINVAL; 803 } 804 805 switch (param->param) { 806 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 807 break; 808 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 809 dev_priv->tex_lru_log_granularity = param->value; 810 break; 811 case I915_SETPARAM_ALLOW_BATCHBUFFER: 812 dev_priv->allow_batchbuffer = param->value; 813 break; 814 case I915_SETPARAM_NUM_USED_FENCES: 815 if (param->value > dev_priv->num_fence_regs || 816 param->value < 0) 817 return -EINVAL; 818 /* Userspace can use first N regs */ 819 dev_priv->fence_reg_start = param->value; 820 break; 821 default: 822 DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n", 823 param->param); 824 return -EINVAL; 825 } 826 827 return 0; 828} 829 830static int i915_set_status_page(struct drm_device *dev, void *data, 831 struct drm_file *file_priv) 832{ 833 drm_i915_private_t *dev_priv = dev->dev_private; 834 drm_i915_hws_addr_t *hws = data; 835 836 if (!I915_NEED_GFX_HWS(dev)) 837 return -EINVAL; 838 839 if (!dev_priv) { 840 DRM_ERROR("called with no initialization\n"); 841 return -EINVAL; 842 } 843 844 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 845 WARN(1, "tried to set status page when mode setting active\n"); 846 return 0; 847 } 848 849 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); 850 851 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 852 853 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 854 dev_priv->hws_map.size = 4*1024; 855 dev_priv->hws_map.type = 0; 856 dev_priv->hws_map.flags = 0; 857 dev_priv->hws_map.mtrr = 0; 858 859 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 860 if (dev_priv->hws_map.handle == NULL) { 861 i915_dma_cleanup(dev); 862 dev_priv->status_gfx_addr = 0; 863 DRM_ERROR("can not ioremap virtual address for" 864 " G33 hw status page\n"); 865 return -ENOMEM; 866 } 867 dev_priv->hw_status_page = dev_priv->hws_map.handle; 868 869 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 870 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 871 DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n", 872 dev_priv->status_gfx_addr); 873 DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n", 874 dev_priv->hw_status_page); 875 return 0; 876} 877 878/** 879 * i915_probe_agp - get AGP bootup configuration 880 * @pdev: PCI device 881 * @aperture_size: returns AGP aperture configured size 882 * @preallocated_size: returns size of BIOS preallocated AGP space 883 * 884 * Since Intel integrated graphics are UMA, the BIOS has to set aside 885 * some RAM for the framebuffer at early boot. This code figures out 886 * how much was set aside so we can use it for our own purposes. 887 */ 888static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, 889 unsigned long *preallocated_size) 890{ 891 struct pci_dev *bridge_dev; 892 u16 tmp = 0; 893 unsigned long overhead; 894 unsigned long stolen; 895 896 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 897 if (!bridge_dev) { 898 DRM_ERROR("bridge device not found\n"); 899 return -1; 900 } 901 902 /* Get the fb aperture size and "stolen" memory amount. */ 903 pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp); 904 pci_dev_put(bridge_dev); 905 906 *aperture_size = 1024 * 1024; 907 *preallocated_size = 1024 * 1024; 908 909 switch (dev->pdev->device) { 910 case PCI_DEVICE_ID_INTEL_82830_CGC: 911 case PCI_DEVICE_ID_INTEL_82845G_IG: 912 case PCI_DEVICE_ID_INTEL_82855GM_IG: 913 case PCI_DEVICE_ID_INTEL_82865_IG: 914 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) 915 *aperture_size *= 64; 916 else 917 *aperture_size *= 128; 918 break; 919 default: 920 /* 9xx supports large sizes, just look at the length */ 921 *aperture_size = pci_resource_len(dev->pdev, 2); 922 break; 923 } 924 925 /* 926 * Some of the preallocated space is taken by the GTT 927 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 928 */ 929 if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev)) 930 overhead = 4096; 931 else 932 overhead = (*aperture_size / 1024) + 4096; 933 934 switch (tmp & INTEL_GMCH_GMS_MASK) { 935 case INTEL_855_GMCH_GMS_DISABLED: 936 DRM_ERROR("video memory is disabled\n"); 937 return -1; 938 case INTEL_855_GMCH_GMS_STOLEN_1M: 939 stolen = 1 * 1024 * 1024; 940 break; 941 case INTEL_855_GMCH_GMS_STOLEN_4M: 942 stolen = 4 * 1024 * 1024; 943 break; 944 case INTEL_855_GMCH_GMS_STOLEN_8M: 945 stolen = 8 * 1024 * 1024; 946 break; 947 case INTEL_855_GMCH_GMS_STOLEN_16M: 948 stolen = 16 * 1024 * 1024; 949 break; 950 case INTEL_855_GMCH_GMS_STOLEN_32M: 951 stolen = 32 * 1024 * 1024; 952 break; 953 case INTEL_915G_GMCH_GMS_STOLEN_48M: 954 stolen = 48 * 1024 * 1024; 955 break; 956 case INTEL_915G_GMCH_GMS_STOLEN_64M: 957 stolen = 64 * 1024 * 1024; 958 break; 959 case INTEL_GMCH_GMS_STOLEN_128M: 960 stolen = 128 * 1024 * 1024; 961 break; 962 case INTEL_GMCH_GMS_STOLEN_256M: 963 stolen = 256 * 1024 * 1024; 964 break; 965 case INTEL_GMCH_GMS_STOLEN_96M: 966 stolen = 96 * 1024 * 1024; 967 break; 968 case INTEL_GMCH_GMS_STOLEN_160M: 969 stolen = 160 * 1024 * 1024; 970 break; 971 case INTEL_GMCH_GMS_STOLEN_224M: 972 stolen = 224 * 1024 * 1024; 973 break; 974 case INTEL_GMCH_GMS_STOLEN_352M: 975 stolen = 352 * 1024 * 1024; 976 break; 977 default: 978 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 979 tmp & INTEL_GMCH_GMS_MASK); 980 return -1; 981 } 982 *preallocated_size = stolen - overhead; 983 984 return 0; 985} 986 987static int i915_load_modeset_init(struct drm_device *dev) 988{ 989 struct drm_i915_private *dev_priv = dev->dev_private; 990 unsigned long agp_size, prealloc_size; 991 int fb_bar = IS_I9XX(dev) ? 2 : 0; 992 int ret = 0; 993 994 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 995 0xff000000; 996 997 if (IS_MOBILE(dev) || IS_I9XX(dev)) 998 dev_priv->cursor_needs_physical = true; 999 else 1000 dev_priv->cursor_needs_physical = false; 1001 1002 if (IS_I965G(dev) || IS_G33(dev)) 1003 dev_priv->cursor_needs_physical = false; 1004 1005 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 1006 if (ret) 1007 goto out; 1008 1009 /* Basic memrange allocator for stolen space (aka vram) */ 1010 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1011 1012 /* Let GEM Manage from end of prealloc space to end of aperture. 1013 * 1014 * However, leave one page at the end still bound to the scratch page. 1015 * There are a number of places where the hardware apparently 1016 * prefetches past the end of the object, and we've seen multiple 1017 * hangs with the GPU head pointer stuck in a batchbuffer bound 1018 * at the last page of the aperture. One page should be enough to 1019 * keep any prefetching inside of the aperture. 1020 */ 1021 i915_gem_do_init(dev, prealloc_size, agp_size - 4096); 1022 1023 ret = i915_gem_init_ringbuffer(dev); 1024 if (ret) 1025 goto out; 1026 1027 /* Allow hardware batchbuffers unless told otherwise. 1028 */ 1029 dev_priv->allow_batchbuffer = 1; 1030 1031 ret = intel_init_bios(dev); 1032 if (ret) 1033 DRM_INFO("failed to find VBIOS tables\n"); 1034 1035 ret = drm_irq_install(dev); 1036 if (ret) 1037 goto destroy_ringbuffer; 1038 1039 /* Always safe in the mode setting case. */ 1040 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1041 dev->vblank_disable_allowed = 1; 1042 1043 /* 1044 * Initialize the hardware status page IRQ location. 1045 */ 1046 1047 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1048 1049 intel_modeset_init(dev); 1050 1051 drm_helper_initial_config(dev); 1052 1053 return 0; 1054 1055destroy_ringbuffer: 1056 i915_gem_cleanup_ringbuffer(dev); 1057out: 1058 return ret; 1059} 1060 1061int i915_master_create(struct drm_device *dev, struct drm_master *master) 1062{ 1063 struct drm_i915_master_private *master_priv; 1064 1065 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); 1066 if (!master_priv) 1067 return -ENOMEM; 1068 1069 master->driver_priv = master_priv; 1070 return 0; 1071} 1072 1073void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1074{ 1075 struct drm_i915_master_private *master_priv = master->driver_priv; 1076 1077 if (!master_priv) 1078 return; 1079 1080 kfree(master_priv); 1081 1082 master->driver_priv = NULL; 1083} 1084 1085static void i915_get_mem_freq(struct drm_device *dev) 1086{ 1087 drm_i915_private_t *dev_priv = dev->dev_private; 1088 u32 tmp; 1089 1090 if (!IS_IGD(dev)) 1091 return; 1092 1093 tmp = I915_READ(CLKCFG); 1094 1095 switch (tmp & CLKCFG_FSB_MASK) { 1096 case CLKCFG_FSB_533: 1097 dev_priv->fsb_freq = 533; /* 133*4 */ 1098 break; 1099 case CLKCFG_FSB_800: 1100 dev_priv->fsb_freq = 800; /* 200*4 */ 1101 break; 1102 case CLKCFG_FSB_667: 1103 dev_priv->fsb_freq = 667; /* 167*4 */ 1104 break; 1105 case CLKCFG_FSB_400: 1106 dev_priv->fsb_freq = 400; /* 100*4 */ 1107 break; 1108 } 1109 1110 switch (tmp & CLKCFG_MEM_MASK) { 1111 case CLKCFG_MEM_533: 1112 dev_priv->mem_freq = 533; 1113 break; 1114 case CLKCFG_MEM_667: 1115 dev_priv->mem_freq = 667; 1116 break; 1117 case CLKCFG_MEM_800: 1118 dev_priv->mem_freq = 800; 1119 break; 1120 } 1121} 1122 1123/** 1124 * i915_driver_load - setup chip and create an initial config 1125 * @dev: DRM device 1126 * @flags: startup flags 1127 * 1128 * The driver load routine has to do several things: 1129 * - drive output discovery via intel_modeset_init() 1130 * - initialize the memory manager 1131 * - allocate initial config memory 1132 * - setup the DRM framebuffer with the allocated memory 1133 */ 1134int i915_driver_load(struct drm_device *dev, unsigned long flags) 1135{ 1136 struct drm_i915_private *dev_priv = dev->dev_private; 1137 resource_size_t base, size; 1138 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1139 1140 /* i915 has 4 more counters */ 1141 dev->counters += 4; 1142 dev->types[6] = _DRM_STAT_IRQ; 1143 dev->types[7] = _DRM_STAT_PRIMARY; 1144 dev->types[8] = _DRM_STAT_SECONDARY; 1145 dev->types[9] = _DRM_STAT_DMA; 1146 1147 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); 1148 if (dev_priv == NULL) 1149 return -ENOMEM; 1150 1151 dev->dev_private = (void *)dev_priv; 1152 dev_priv->dev = dev; 1153 1154 /* Add register map (needed for suspend/resume) */ 1155 base = drm_get_resource_start(dev, mmio_bar); 1156 size = drm_get_resource_len(dev, mmio_bar); 1157 1158 dev_priv->regs = ioremap(base, size); 1159 if (!dev_priv->regs) { 1160 DRM_ERROR("failed to map registers\n"); 1161 ret = -EIO; 1162 goto free_priv; 1163 } 1164 1165 dev_priv->mm.gtt_mapping = 1166 io_mapping_create_wc(dev->agp->base, 1167 dev->agp->agp_info.aper_size * 1024*1024); 1168 if (dev_priv->mm.gtt_mapping == NULL) { 1169 ret = -EIO; 1170 goto out_rmmap; 1171 } 1172 1173 /* Set up a WC MTRR for non-PAT systems. This is more common than 1174 * one would think, because the kernel disables PAT on first 1175 * generation Core chips because WC PAT gets overridden by a UC 1176 * MTRR if present. Even if a UC MTRR isn't present. 1177 */ 1178 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 1179 dev->agp->agp_info.aper_size * 1180 1024 * 1024, 1181 MTRR_TYPE_WRCOMB, 1); 1182 if (dev_priv->mm.gtt_mtrr < 0) { 1183 DRM_INFO("MTRR allocation failed. Graphics " 1184 "performance may suffer.\n"); 1185 } 1186 1187 /* enable GEM by default */ 1188 dev_priv->has_gem = 1; 1189 1190 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1191 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1192 if (IS_G4X(dev) || IS_IGDNG(dev)) { 1193 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1194 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1195 } 1196 1197 i915_gem_load(dev); 1198 1199 /* Init HWS */ 1200 if (!I915_NEED_GFX_HWS(dev)) { 1201 ret = i915_init_phys_hws(dev); 1202 if (ret != 0) 1203 goto out_iomapfree; 1204 } 1205 1206 i915_get_mem_freq(dev); 1207 1208 /* On the 945G/GM, the chipset reports the MSI capability on the 1209 * integrated graphics even though the support isn't actually there 1210 * according to the published specs. It doesn't appear to function 1211 * correctly in testing on 945G. 1212 * This may be a side effect of MSI having been made available for PEG 1213 * and the registers being closely associated. 1214 * 1215 * According to chipset errata, on the 965GM, MSI interrupts may 1216 * be lost or delayed, but we use them anyways to avoid 1217 * stuck interrupts on some machines. 1218 */ 1219 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1220 pci_enable_msi(dev->pdev); 1221 1222 spin_lock_init(&dev_priv->user_irq_lock); 1223 spin_lock_init(&dev_priv->error_lock); 1224 dev_priv->user_irq_refcount = 0; 1225 1226 ret = drm_vblank_init(dev, I915_NUM_PIPE); 1227 1228 if (ret) { 1229 (void) i915_driver_unload(dev); 1230 return ret; 1231 } 1232 1233 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1234 ret = i915_load_modeset_init(dev); 1235 if (ret < 0) { 1236 DRM_ERROR("failed to init modeset\n"); 1237 goto out_rmmap; 1238 } 1239 } 1240 1241 /* Must be done after probing outputs */ 1242 /* FIXME: verify on IGDNG */ 1243 if (!IS_IGDNG(dev)) 1244 intel_opregion_init(dev, 0); 1245 1246 return 0; 1247 1248out_iomapfree: 1249 io_mapping_free(dev_priv->mm.gtt_mapping); 1250out_rmmap: 1251 iounmap(dev_priv->regs); 1252free_priv: 1253 kfree(dev_priv); 1254 return ret; 1255} 1256 1257int i915_driver_unload(struct drm_device *dev) 1258{ 1259 struct drm_i915_private *dev_priv = dev->dev_private; 1260 1261 io_mapping_free(dev_priv->mm.gtt_mapping); 1262 if (dev_priv->mm.gtt_mtrr >= 0) { 1263 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 1264 dev->agp->agp_info.aper_size * 1024 * 1024); 1265 dev_priv->mm.gtt_mtrr = -1; 1266 } 1267 1268 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1269 drm_irq_uninstall(dev); 1270 } 1271 1272 if (dev->pdev->msi_enabled) 1273 pci_disable_msi(dev->pdev); 1274 1275 if (dev_priv->regs != NULL) 1276 iounmap(dev_priv->regs); 1277 1278 if (!IS_IGDNG(dev)) 1279 intel_opregion_free(dev, 0); 1280 1281 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1282 intel_modeset_cleanup(dev); 1283 1284 i915_gem_free_all_phys_object(dev); 1285 1286 mutex_lock(&dev->struct_mutex); 1287 i915_gem_cleanup_ringbuffer(dev); 1288 mutex_unlock(&dev->struct_mutex); 1289 drm_mm_takedown(&dev_priv->vram); 1290 i915_gem_lastclose(dev); 1291 } 1292 1293 kfree(dev->dev_private); 1294 1295 return 0; 1296} 1297 1298int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1299{ 1300 struct drm_i915_file_private *i915_file_priv; 1301 1302 DRM_DEBUG_DRIVER(I915_DRV, "\n"); 1303 i915_file_priv = (struct drm_i915_file_private *) 1304 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); 1305 1306 if (!i915_file_priv) 1307 return -ENOMEM; 1308 1309 file_priv->driver_priv = i915_file_priv; 1310 1311 INIT_LIST_HEAD(&i915_file_priv->mm.request_list); 1312 1313 return 0; 1314} 1315 1316/** 1317 * i915_driver_lastclose - clean up after all DRM clients have exited 1318 * @dev: DRM device 1319 * 1320 * Take care of cleaning up after all DRM clients have exited. In the 1321 * mode setting case, we want to restore the kernel's initial mode (just 1322 * in case the last client left us in a bad state). 1323 * 1324 * Additionally, in the non-mode setting case, we'll tear down the AGP 1325 * and DMA structures, since the kernel won't be using them, and clea 1326 * up any GEM state. 1327 */ 1328void i915_driver_lastclose(struct drm_device * dev) 1329{ 1330 drm_i915_private_t *dev_priv = dev->dev_private; 1331 1332 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 1333 intelfb_restore(); 1334 return; 1335 } 1336 1337 i915_gem_lastclose(dev); 1338 1339 if (dev_priv->agp_heap) 1340 i915_mem_takedown(&(dev_priv->agp_heap)); 1341 1342 i915_dma_cleanup(dev); 1343} 1344 1345void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1346{ 1347 drm_i915_private_t *dev_priv = dev->dev_private; 1348 i915_gem_release(dev, file_priv); 1349 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1350 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 1351} 1352 1353void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 1354{ 1355 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1356 1357 kfree(i915_file_priv); 1358} 1359 1360struct drm_ioctl_desc i915_ioctls[] = { 1361 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1362 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1363 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 1364 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 1365 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 1366 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 1367 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 1368 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1369 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), 1370 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), 1371 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1372 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 1373 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1374 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1375 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 1376 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1377 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1378 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1379 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 1380 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1381 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1382 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), 1383 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), 1384 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1385 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1386 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), 1387 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), 1388 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), 1389 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), 1390 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), 1391 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), 1392 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), 1393 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), 1394 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 1395 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), 1396 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1397}; 1398 1399int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1400 1401/** 1402 * Determine if the device really is AGP or not. 1403 * 1404 * All Intel graphics chipsets are treated as AGP, even if they are really 1405 * PCI-e. 1406 * 1407 * \param dev The device to be tested. 1408 * 1409 * \returns 1410 * A value of 1 is always retured to indictate every i9x5 is AGP. 1411 */ 1412int i915_driver_device_is_agp(struct drm_device * dev) 1413{ 1414 return 1; 1415} 1416