i915_dma.c revision 6fb88588555a18792a27f483887fe1f2af5f9c9b
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include "drmP.h" 30#include "drm.h" 31#include "drm_crtc_helper.h" 32#include "intel_drv.h" 33#include "i915_drm.h" 34#include "i915_drv.h" 35 36/* Really want an OS-independent resettable timer. Would like to have 37 * this loop run for (eg) 3 sec, but have the timer reset every time 38 * the head pointer changes, so that EBUSY only happens if the ring 39 * actually stalls for (eg) 3 seconds. 40 */ 41int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 42{ 43 drm_i915_private_t *dev_priv = dev->dev_private; 44 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 45 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 46 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 47 u32 last_acthd = I915_READ(acthd_reg); 48 u32 acthd; 49 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 50 int i; 51 52 for (i = 0; i < 100000; i++) { 53 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 54 acthd = I915_READ(acthd_reg); 55 ring->space = ring->head - (ring->tail + 8); 56 if (ring->space < 0) 57 ring->space += ring->Size; 58 if (ring->space >= n) 59 return 0; 60 61 if (master_priv->sarea_priv) 62 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 63 64 if (ring->head != last_head) 65 i = 0; 66 if (acthd != last_acthd) 67 i = 0; 68 69 last_head = ring->head; 70 last_acthd = acthd; 71 msleep_interruptible(10); 72 73 } 74 75 return -EBUSY; 76} 77 78/** 79 * Sets up the hardware status page for devices that need a physical address 80 * in the register. 81 */ 82static int i915_init_phys_hws(struct drm_device *dev) 83{ 84 drm_i915_private_t *dev_priv = dev->dev_private; 85 /* Program Hardware Status Page */ 86 dev_priv->status_page_dmah = 87 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 88 89 if (!dev_priv->status_page_dmah) { 90 DRM_ERROR("Can not allocate hardware status page\n"); 91 return -ENOMEM; 92 } 93 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 94 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 95 96 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 97 98 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 99 DRM_DEBUG("Enabled hardware status page\n"); 100 return 0; 101} 102 103/** 104 * Frees the hardware status page, whether it's a physical address or a virtual 105 * address set up by the X Server. 106 */ 107static void i915_free_hws(struct drm_device *dev) 108{ 109 drm_i915_private_t *dev_priv = dev->dev_private; 110 if (dev_priv->status_page_dmah) { 111 drm_pci_free(dev, dev_priv->status_page_dmah); 112 dev_priv->status_page_dmah = NULL; 113 } 114 115 if (dev_priv->status_gfx_addr) { 116 dev_priv->status_gfx_addr = 0; 117 drm_core_ioremapfree(&dev_priv->hws_map, dev); 118 } 119 120 /* Need to rewrite hardware status page */ 121 I915_WRITE(HWS_PGA, 0x1ffff000); 122} 123 124void i915_kernel_lost_context(struct drm_device * dev) 125{ 126 drm_i915_private_t *dev_priv = dev->dev_private; 127 struct drm_i915_master_private *master_priv; 128 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 129 130 /* 131 * We should never lose context on the ring with modesetting 132 * as we don't expose it to userspace 133 */ 134 if (drm_core_check_feature(dev, DRIVER_MODESET)) 135 return; 136 137 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 138 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 139 ring->space = ring->head - (ring->tail + 8); 140 if (ring->space < 0) 141 ring->space += ring->Size; 142 143 if (!dev->primary->master) 144 return; 145 146 master_priv = dev->primary->master->driver_priv; 147 if (ring->head == ring->tail && master_priv->sarea_priv) 148 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 149} 150 151static int i915_dma_cleanup(struct drm_device * dev) 152{ 153 drm_i915_private_t *dev_priv = dev->dev_private; 154 /* Make sure interrupts are disabled here because the uninstall ioctl 155 * may not have been called from userspace and after dev_private 156 * is freed, it's too late. 157 */ 158 if (dev->irq_enabled) 159 drm_irq_uninstall(dev); 160 161 if (dev_priv->ring.virtual_start) { 162 drm_core_ioremapfree(&dev_priv->ring.map, dev); 163 dev_priv->ring.virtual_start = NULL; 164 dev_priv->ring.map.handle = NULL; 165 dev_priv->ring.map.size = 0; 166 } 167 168 /* Clear the HWS virtual address at teardown */ 169 if (I915_NEED_GFX_HWS(dev)) 170 i915_free_hws(dev); 171 172 return 0; 173} 174 175static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 176{ 177 drm_i915_private_t *dev_priv = dev->dev_private; 178 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 179 180 master_priv->sarea = drm_getsarea(dev); 181 if (master_priv->sarea) { 182 master_priv->sarea_priv = (drm_i915_sarea_t *) 183 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 184 } else { 185 DRM_DEBUG("sarea not found assuming DRI2 userspace\n"); 186 } 187 188 if (init->ring_size != 0) { 189 if (dev_priv->ring.ring_obj != NULL) { 190 i915_dma_cleanup(dev); 191 DRM_ERROR("Client tried to initialize ringbuffer in " 192 "GEM mode\n"); 193 return -EINVAL; 194 } 195 196 dev_priv->ring.Size = init->ring_size; 197 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 198 199 dev_priv->ring.map.offset = init->ring_start; 200 dev_priv->ring.map.size = init->ring_size; 201 dev_priv->ring.map.type = 0; 202 dev_priv->ring.map.flags = 0; 203 dev_priv->ring.map.mtrr = 0; 204 205 drm_core_ioremap_wc(&dev_priv->ring.map, dev); 206 207 if (dev_priv->ring.map.handle == NULL) { 208 i915_dma_cleanup(dev); 209 DRM_ERROR("can not ioremap virtual address for" 210 " ring buffer\n"); 211 return -ENOMEM; 212 } 213 } 214 215 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 216 217 dev_priv->cpp = init->cpp; 218 dev_priv->back_offset = init->back_offset; 219 dev_priv->front_offset = init->front_offset; 220 dev_priv->current_page = 0; 221 if (master_priv->sarea_priv) 222 master_priv->sarea_priv->pf_current_page = 0; 223 224 /* Allow hardware batchbuffers unless told otherwise. 225 */ 226 dev_priv->allow_batchbuffer = 1; 227 228 return 0; 229} 230 231static int i915_dma_resume(struct drm_device * dev) 232{ 233 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 234 235 DRM_DEBUG("%s\n", __func__); 236 237 if (dev_priv->ring.map.handle == NULL) { 238 DRM_ERROR("can not ioremap virtual address for" 239 " ring buffer\n"); 240 return -ENOMEM; 241 } 242 243 /* Program Hardware Status Page */ 244 if (!dev_priv->hw_status_page) { 245 DRM_ERROR("Can not find hardware status page\n"); 246 return -EINVAL; 247 } 248 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 249 250 if (dev_priv->status_gfx_addr != 0) 251 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 252 else 253 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 254 DRM_DEBUG("Enabled hardware status page\n"); 255 256 return 0; 257} 258 259static int i915_dma_init(struct drm_device *dev, void *data, 260 struct drm_file *file_priv) 261{ 262 drm_i915_init_t *init = data; 263 int retcode = 0; 264 265 switch (init->func) { 266 case I915_INIT_DMA: 267 retcode = i915_initialize(dev, init); 268 break; 269 case I915_CLEANUP_DMA: 270 retcode = i915_dma_cleanup(dev); 271 break; 272 case I915_RESUME_DMA: 273 retcode = i915_dma_resume(dev); 274 break; 275 default: 276 retcode = -EINVAL; 277 break; 278 } 279 280 return retcode; 281} 282 283/* Implement basically the same security restrictions as hardware does 284 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 285 * 286 * Most of the calculations below involve calculating the size of a 287 * particular instruction. It's important to get the size right as 288 * that tells us where the next instruction to check is. Any illegal 289 * instruction detected will be given a size of zero, which is a 290 * signal to abort the rest of the buffer. 291 */ 292static int do_validate_cmd(int cmd) 293{ 294 switch (((cmd >> 29) & 0x7)) { 295 case 0x0: 296 switch ((cmd >> 23) & 0x3f) { 297 case 0x0: 298 return 1; /* MI_NOOP */ 299 case 0x4: 300 return 1; /* MI_FLUSH */ 301 default: 302 return 0; /* disallow everything else */ 303 } 304 break; 305 case 0x1: 306 return 0; /* reserved */ 307 case 0x2: 308 return (cmd & 0xff) + 2; /* 2d commands */ 309 case 0x3: 310 if (((cmd >> 24) & 0x1f) <= 0x18) 311 return 1; 312 313 switch ((cmd >> 24) & 0x1f) { 314 case 0x1c: 315 return 1; 316 case 0x1d: 317 switch ((cmd >> 16) & 0xff) { 318 case 0x3: 319 return (cmd & 0x1f) + 2; 320 case 0x4: 321 return (cmd & 0xf) + 2; 322 default: 323 return (cmd & 0xffff) + 2; 324 } 325 case 0x1e: 326 if (cmd & (1 << 23)) 327 return (cmd & 0xffff) + 1; 328 else 329 return 1; 330 case 0x1f: 331 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 332 return (cmd & 0x1ffff) + 2; 333 else if (cmd & (1 << 17)) /* indirect random */ 334 if ((cmd & 0xffff) == 0) 335 return 0; /* unknown length, too hard */ 336 else 337 return (((cmd & 0xffff) + 1) / 2) + 1; 338 else 339 return 2; /* indirect sequential */ 340 default: 341 return 0; 342 } 343 default: 344 return 0; 345 } 346 347 return 0; 348} 349 350static int validate_cmd(int cmd) 351{ 352 int ret = do_validate_cmd(cmd); 353 354/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ 355 356 return ret; 357} 358 359static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) 360{ 361 drm_i915_private_t *dev_priv = dev->dev_private; 362 int i; 363 RING_LOCALS; 364 365 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 366 return -EINVAL; 367 368 BEGIN_LP_RING((dwords+1)&~1); 369 370 for (i = 0; i < dwords;) { 371 int cmd, sz; 372 373 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 374 return -EINVAL; 375 376 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 377 return -EINVAL; 378 379 OUT_RING(cmd); 380 381 while (++i, --sz) { 382 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 383 sizeof(cmd))) { 384 return -EINVAL; 385 } 386 OUT_RING(cmd); 387 } 388 } 389 390 if (dwords & 1) 391 OUT_RING(0); 392 393 ADVANCE_LP_RING(); 394 395 return 0; 396} 397 398int 399i915_emit_box(struct drm_device *dev, 400 struct drm_clip_rect __user *boxes, 401 int i, int DR1, int DR4) 402{ 403 drm_i915_private_t *dev_priv = dev->dev_private; 404 struct drm_clip_rect box; 405 RING_LOCALS; 406 407 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { 408 return -EFAULT; 409 } 410 411 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 412 DRM_ERROR("Bad box %d,%d..%d,%d\n", 413 box.x1, box.y1, box.x2, box.y2); 414 return -EINVAL; 415 } 416 417 if (IS_I965G(dev)) { 418 BEGIN_LP_RING(4); 419 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 420 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 421 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 422 OUT_RING(DR4); 423 ADVANCE_LP_RING(); 424 } else { 425 BEGIN_LP_RING(6); 426 OUT_RING(GFX_OP_DRAWRECT_INFO); 427 OUT_RING(DR1); 428 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 429 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 430 OUT_RING(DR4); 431 OUT_RING(0); 432 ADVANCE_LP_RING(); 433 } 434 435 return 0; 436} 437 438/* XXX: Emitting the counter should really be moved to part of the IRQ 439 * emit. For now, do it in both places: 440 */ 441 442static void i915_emit_breadcrumb(struct drm_device *dev) 443{ 444 drm_i915_private_t *dev_priv = dev->dev_private; 445 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 446 RING_LOCALS; 447 448 dev_priv->counter++; 449 if (dev_priv->counter > 0x7FFFFFFFUL) 450 dev_priv->counter = 0; 451 if (master_priv->sarea_priv) 452 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 453 454 BEGIN_LP_RING(4); 455 OUT_RING(MI_STORE_DWORD_INDEX); 456 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 457 OUT_RING(dev_priv->counter); 458 OUT_RING(0); 459 ADVANCE_LP_RING(); 460} 461 462static int i915_dispatch_cmdbuffer(struct drm_device * dev, 463 drm_i915_cmdbuffer_t * cmd) 464{ 465 int nbox = cmd->num_cliprects; 466 int i = 0, count, ret; 467 468 if (cmd->sz & 0x3) { 469 DRM_ERROR("alignment"); 470 return -EINVAL; 471 } 472 473 i915_kernel_lost_context(dev); 474 475 count = nbox ? nbox : 1; 476 477 for (i = 0; i < count; i++) { 478 if (i < nbox) { 479 ret = i915_emit_box(dev, cmd->cliprects, i, 480 cmd->DR1, cmd->DR4); 481 if (ret) 482 return ret; 483 } 484 485 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); 486 if (ret) 487 return ret; 488 } 489 490 i915_emit_breadcrumb(dev); 491 return 0; 492} 493 494static int i915_dispatch_batchbuffer(struct drm_device * dev, 495 drm_i915_batchbuffer_t * batch) 496{ 497 drm_i915_private_t *dev_priv = dev->dev_private; 498 struct drm_clip_rect __user *boxes = batch->cliprects; 499 int nbox = batch->num_cliprects; 500 int i = 0, count; 501 RING_LOCALS; 502 503 if ((batch->start | batch->used) & 0x7) { 504 DRM_ERROR("alignment"); 505 return -EINVAL; 506 } 507 508 i915_kernel_lost_context(dev); 509 510 count = nbox ? nbox : 1; 511 512 for (i = 0; i < count; i++) { 513 if (i < nbox) { 514 int ret = i915_emit_box(dev, boxes, i, 515 batch->DR1, batch->DR4); 516 if (ret) 517 return ret; 518 } 519 520 if (!IS_I830(dev) && !IS_845G(dev)) { 521 BEGIN_LP_RING(2); 522 if (IS_I965G(dev)) { 523 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 524 OUT_RING(batch->start); 525 } else { 526 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 527 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 528 } 529 ADVANCE_LP_RING(); 530 } else { 531 BEGIN_LP_RING(4); 532 OUT_RING(MI_BATCH_BUFFER); 533 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 534 OUT_RING(batch->start + batch->used - 4); 535 OUT_RING(0); 536 ADVANCE_LP_RING(); 537 } 538 } 539 540 i915_emit_breadcrumb(dev); 541 542 return 0; 543} 544 545static int i915_dispatch_flip(struct drm_device * dev) 546{ 547 drm_i915_private_t *dev_priv = dev->dev_private; 548 struct drm_i915_master_private *master_priv = 549 dev->primary->master->driver_priv; 550 RING_LOCALS; 551 552 if (!master_priv->sarea_priv) 553 return -EINVAL; 554 555 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 556 __func__, 557 dev_priv->current_page, 558 master_priv->sarea_priv->pf_current_page); 559 560 i915_kernel_lost_context(dev); 561 562 BEGIN_LP_RING(2); 563 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 564 OUT_RING(0); 565 ADVANCE_LP_RING(); 566 567 BEGIN_LP_RING(6); 568 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 569 OUT_RING(0); 570 if (dev_priv->current_page == 0) { 571 OUT_RING(dev_priv->back_offset); 572 dev_priv->current_page = 1; 573 } else { 574 OUT_RING(dev_priv->front_offset); 575 dev_priv->current_page = 0; 576 } 577 OUT_RING(0); 578 ADVANCE_LP_RING(); 579 580 BEGIN_LP_RING(2); 581 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 582 OUT_RING(0); 583 ADVANCE_LP_RING(); 584 585 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 586 587 BEGIN_LP_RING(4); 588 OUT_RING(MI_STORE_DWORD_INDEX); 589 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 590 OUT_RING(dev_priv->counter); 591 OUT_RING(0); 592 ADVANCE_LP_RING(); 593 594 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 595 return 0; 596} 597 598static int i915_quiescent(struct drm_device * dev) 599{ 600 drm_i915_private_t *dev_priv = dev->dev_private; 601 602 i915_kernel_lost_context(dev); 603 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); 604} 605 606static int i915_flush_ioctl(struct drm_device *dev, void *data, 607 struct drm_file *file_priv) 608{ 609 int ret; 610 611 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 612 613 mutex_lock(&dev->struct_mutex); 614 ret = i915_quiescent(dev); 615 mutex_unlock(&dev->struct_mutex); 616 617 return ret; 618} 619 620static int i915_batchbuffer(struct drm_device *dev, void *data, 621 struct drm_file *file_priv) 622{ 623 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 624 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 625 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 626 master_priv->sarea_priv; 627 drm_i915_batchbuffer_t *batch = data; 628 int ret; 629 630 if (!dev_priv->allow_batchbuffer) { 631 DRM_ERROR("Batchbuffer ioctl disabled\n"); 632 return -EINVAL; 633 } 634 635 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 636 batch->start, batch->used, batch->num_cliprects); 637 638 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 639 640 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, 641 batch->num_cliprects * 642 sizeof(struct drm_clip_rect))) 643 return -EFAULT; 644 645 mutex_lock(&dev->struct_mutex); 646 ret = i915_dispatch_batchbuffer(dev, batch); 647 mutex_unlock(&dev->struct_mutex); 648 649 if (sarea_priv) 650 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 651 return ret; 652} 653 654static int i915_cmdbuffer(struct drm_device *dev, void *data, 655 struct drm_file *file_priv) 656{ 657 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 658 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 659 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 660 master_priv->sarea_priv; 661 drm_i915_cmdbuffer_t *cmdbuf = data; 662 int ret; 663 664 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 665 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 666 667 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 668 669 if (cmdbuf->num_cliprects && 670 DRM_VERIFYAREA_READ(cmdbuf->cliprects, 671 cmdbuf->num_cliprects * 672 sizeof(struct drm_clip_rect))) { 673 DRM_ERROR("Fault accessing cliprects\n"); 674 return -EFAULT; 675 } 676 677 mutex_lock(&dev->struct_mutex); 678 ret = i915_dispatch_cmdbuffer(dev, cmdbuf); 679 mutex_unlock(&dev->struct_mutex); 680 if (ret) { 681 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 682 return ret; 683 } 684 685 if (sarea_priv) 686 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 687 return 0; 688} 689 690static int i915_flip_bufs(struct drm_device *dev, void *data, 691 struct drm_file *file_priv) 692{ 693 int ret; 694 695 DRM_DEBUG("%s\n", __func__); 696 697 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 698 699 mutex_lock(&dev->struct_mutex); 700 ret = i915_dispatch_flip(dev); 701 mutex_unlock(&dev->struct_mutex); 702 703 return ret; 704} 705 706static int i915_getparam(struct drm_device *dev, void *data, 707 struct drm_file *file_priv) 708{ 709 drm_i915_private_t *dev_priv = dev->dev_private; 710 drm_i915_getparam_t *param = data; 711 int value; 712 713 if (!dev_priv) { 714 DRM_ERROR("called with no initialization\n"); 715 return -EINVAL; 716 } 717 718 switch (param->param) { 719 case I915_PARAM_IRQ_ACTIVE: 720 value = dev->pdev->irq ? 1 : 0; 721 break; 722 case I915_PARAM_ALLOW_BATCHBUFFER: 723 value = dev_priv->allow_batchbuffer ? 1 : 0; 724 break; 725 case I915_PARAM_LAST_DISPATCH: 726 value = READ_BREADCRUMB(dev_priv); 727 break; 728 case I915_PARAM_CHIPSET_ID: 729 value = dev->pci_device; 730 break; 731 case I915_PARAM_HAS_GEM: 732 value = dev_priv->has_gem; 733 break; 734 case I915_PARAM_NUM_FENCES_AVAIL: 735 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 736 break; 737 default: 738 DRM_DEBUG("Unknown parameter %d\n", param->param); 739 return -EINVAL; 740 } 741 742 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 743 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 744 return -EFAULT; 745 } 746 747 return 0; 748} 749 750static int i915_setparam(struct drm_device *dev, void *data, 751 struct drm_file *file_priv) 752{ 753 drm_i915_private_t *dev_priv = dev->dev_private; 754 drm_i915_setparam_t *param = data; 755 756 if (!dev_priv) { 757 DRM_ERROR("called with no initialization\n"); 758 return -EINVAL; 759 } 760 761 switch (param->param) { 762 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 763 break; 764 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 765 dev_priv->tex_lru_log_granularity = param->value; 766 break; 767 case I915_SETPARAM_ALLOW_BATCHBUFFER: 768 dev_priv->allow_batchbuffer = param->value; 769 break; 770 case I915_SETPARAM_NUM_USED_FENCES: 771 if (param->value > dev_priv->num_fence_regs || 772 param->value < 0) 773 return -EINVAL; 774 /* Userspace can use first N regs */ 775 dev_priv->fence_reg_start = param->value; 776 break; 777 default: 778 DRM_DEBUG("unknown parameter %d\n", param->param); 779 return -EINVAL; 780 } 781 782 return 0; 783} 784 785static int i915_set_status_page(struct drm_device *dev, void *data, 786 struct drm_file *file_priv) 787{ 788 drm_i915_private_t *dev_priv = dev->dev_private; 789 drm_i915_hws_addr_t *hws = data; 790 791 if (!I915_NEED_GFX_HWS(dev)) 792 return -EINVAL; 793 794 if (!dev_priv) { 795 DRM_ERROR("called with no initialization\n"); 796 return -EINVAL; 797 } 798 799 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 800 WARN(1, "tried to set status page when mode setting active\n"); 801 return 0; 802 } 803 804 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); 805 806 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 807 808 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 809 dev_priv->hws_map.size = 4*1024; 810 dev_priv->hws_map.type = 0; 811 dev_priv->hws_map.flags = 0; 812 dev_priv->hws_map.mtrr = 0; 813 814 drm_core_ioremap(&dev_priv->hws_map, dev); 815 if (dev_priv->hws_map.handle == NULL) { 816 i915_dma_cleanup(dev); 817 dev_priv->status_gfx_addr = 0; 818 DRM_ERROR("can not ioremap virtual address for" 819 " G33 hw status page\n"); 820 return -ENOMEM; 821 } 822 dev_priv->hw_status_page = dev_priv->hws_map.handle; 823 824 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 825 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 826 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", 827 dev_priv->status_gfx_addr); 828 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 829 return 0; 830} 831 832/** 833 * i915_probe_agp - get AGP bootup configuration 834 * @pdev: PCI device 835 * @aperture_size: returns AGP aperture configured size 836 * @preallocated_size: returns size of BIOS preallocated AGP space 837 * 838 * Since Intel integrated graphics are UMA, the BIOS has to set aside 839 * some RAM for the framebuffer at early boot. This code figures out 840 * how much was set aside so we can use it for our own purposes. 841 */ 842static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, 843 unsigned long *preallocated_size) 844{ 845 struct pci_dev *bridge_dev; 846 u16 tmp = 0; 847 unsigned long overhead; 848 unsigned long stolen; 849 850 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 851 if (!bridge_dev) { 852 DRM_ERROR("bridge device not found\n"); 853 return -1; 854 } 855 856 /* Get the fb aperture size and "stolen" memory amount. */ 857 pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp); 858 pci_dev_put(bridge_dev); 859 860 *aperture_size = 1024 * 1024; 861 *preallocated_size = 1024 * 1024; 862 863 switch (dev->pdev->device) { 864 case PCI_DEVICE_ID_INTEL_82830_CGC: 865 case PCI_DEVICE_ID_INTEL_82845G_IG: 866 case PCI_DEVICE_ID_INTEL_82855GM_IG: 867 case PCI_DEVICE_ID_INTEL_82865_IG: 868 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) 869 *aperture_size *= 64; 870 else 871 *aperture_size *= 128; 872 break; 873 default: 874 /* 9xx supports large sizes, just look at the length */ 875 *aperture_size = pci_resource_len(dev->pdev, 2); 876 break; 877 } 878 879 /* 880 * Some of the preallocated space is taken by the GTT 881 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 882 */ 883 if (IS_G4X(dev)) 884 overhead = 4096; 885 else 886 overhead = (*aperture_size / 1024) + 4096; 887 888 switch (tmp & INTEL_GMCH_GMS_MASK) { 889 case INTEL_855_GMCH_GMS_DISABLED: 890 DRM_ERROR("video memory is disabled\n"); 891 return -1; 892 case INTEL_855_GMCH_GMS_STOLEN_1M: 893 stolen = 1 * 1024 * 1024; 894 break; 895 case INTEL_855_GMCH_GMS_STOLEN_4M: 896 stolen = 4 * 1024 * 1024; 897 break; 898 case INTEL_855_GMCH_GMS_STOLEN_8M: 899 stolen = 8 * 1024 * 1024; 900 break; 901 case INTEL_855_GMCH_GMS_STOLEN_16M: 902 stolen = 16 * 1024 * 1024; 903 break; 904 case INTEL_855_GMCH_GMS_STOLEN_32M: 905 stolen = 32 * 1024 * 1024; 906 break; 907 case INTEL_915G_GMCH_GMS_STOLEN_48M: 908 stolen = 48 * 1024 * 1024; 909 break; 910 case INTEL_915G_GMCH_GMS_STOLEN_64M: 911 stolen = 64 * 1024 * 1024; 912 break; 913 case INTEL_GMCH_GMS_STOLEN_128M: 914 stolen = 128 * 1024 * 1024; 915 break; 916 case INTEL_GMCH_GMS_STOLEN_256M: 917 stolen = 256 * 1024 * 1024; 918 break; 919 case INTEL_GMCH_GMS_STOLEN_96M: 920 stolen = 96 * 1024 * 1024; 921 break; 922 case INTEL_GMCH_GMS_STOLEN_160M: 923 stolen = 160 * 1024 * 1024; 924 break; 925 case INTEL_GMCH_GMS_STOLEN_224M: 926 stolen = 224 * 1024 * 1024; 927 break; 928 case INTEL_GMCH_GMS_STOLEN_352M: 929 stolen = 352 * 1024 * 1024; 930 break; 931 default: 932 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 933 tmp & INTEL_GMCH_GMS_MASK); 934 return -1; 935 } 936 *preallocated_size = stolen - overhead; 937 938 return 0; 939} 940 941static int i915_load_modeset_init(struct drm_device *dev) 942{ 943 struct drm_i915_private *dev_priv = dev->dev_private; 944 unsigned long agp_size, prealloc_size; 945 int fb_bar = IS_I9XX(dev) ? 2 : 0; 946 int ret = 0; 947 948 dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL); 949 if (!dev->devname) { 950 ret = -ENOMEM; 951 goto out; 952 } 953 954 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 955 0xff000000; 956 957 if (IS_MOBILE(dev) || IS_I9XX(dev)) 958 dev_priv->cursor_needs_physical = true; 959 else 960 dev_priv->cursor_needs_physical = false; 961 962 if (IS_I965G(dev) || IS_G33(dev)) 963 dev_priv->cursor_needs_physical = false; 964 965 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 966 if (ret) 967 goto kfree_devname; 968 969 /* Basic memrange allocator for stolen space (aka vram) */ 970 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 971 972 /* Let GEM Manage from end of prealloc space to end of aperture */ 973 i915_gem_do_init(dev, prealloc_size, agp_size); 974 975 ret = i915_gem_init_ringbuffer(dev); 976 if (ret) 977 goto kfree_devname; 978 979 /* Allow hardware batchbuffers unless told otherwise. 980 */ 981 dev_priv->allow_batchbuffer = 1; 982 983 ret = intel_init_bios(dev); 984 if (ret) 985 DRM_INFO("failed to find VBIOS tables\n"); 986 987 ret = drm_irq_install(dev); 988 if (ret) 989 goto destroy_ringbuffer; 990 991 /* FIXME: re-add hotplug support */ 992#if 0 993 ret = drm_hotplug_init(dev); 994 if (ret) 995 goto destroy_ringbuffer; 996#endif 997 998 /* Always safe in the mode setting case. */ 999 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1000 dev->vblank_disable_allowed = 1; 1001 1002 /* 1003 * Initialize the hardware status page IRQ location. 1004 */ 1005 1006 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1007 1008 intel_modeset_init(dev); 1009 1010 drm_helper_initial_config(dev, false); 1011 1012 return 0; 1013 1014destroy_ringbuffer: 1015 i915_gem_cleanup_ringbuffer(dev); 1016kfree_devname: 1017 kfree(dev->devname); 1018out: 1019 return ret; 1020} 1021 1022int i915_master_create(struct drm_device *dev, struct drm_master *master) 1023{ 1024 struct drm_i915_master_private *master_priv; 1025 1026 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 1027 if (!master_priv) 1028 return -ENOMEM; 1029 1030 master->driver_priv = master_priv; 1031 return 0; 1032} 1033 1034void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1035{ 1036 struct drm_i915_master_private *master_priv = master->driver_priv; 1037 1038 if (!master_priv) 1039 return; 1040 1041 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 1042 1043 master->driver_priv = NULL; 1044} 1045 1046/** 1047 * i915_driver_load - setup chip and create an initial config 1048 * @dev: DRM device 1049 * @flags: startup flags 1050 * 1051 * The driver load routine has to do several things: 1052 * - drive output discovery via intel_modeset_init() 1053 * - initialize the memory manager 1054 * - allocate initial config memory 1055 * - setup the DRM framebuffer with the allocated memory 1056 */ 1057int i915_driver_load(struct drm_device *dev, unsigned long flags) 1058{ 1059 struct drm_i915_private *dev_priv = dev->dev_private; 1060 unsigned long base, size; 1061 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1062 1063 /* i915 has 4 more counters */ 1064 dev->counters += 4; 1065 dev->types[6] = _DRM_STAT_IRQ; 1066 dev->types[7] = _DRM_STAT_PRIMARY; 1067 dev->types[8] = _DRM_STAT_SECONDARY; 1068 dev->types[9] = _DRM_STAT_DMA; 1069 1070 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); 1071 if (dev_priv == NULL) 1072 return -ENOMEM; 1073 1074 memset(dev_priv, 0, sizeof(drm_i915_private_t)); 1075 1076 dev->dev_private = (void *)dev_priv; 1077 dev_priv->dev = dev; 1078 1079 /* Add register map (needed for suspend/resume) */ 1080 base = drm_get_resource_start(dev, mmio_bar); 1081 size = drm_get_resource_len(dev, mmio_bar); 1082 1083 dev_priv->regs = ioremap(base, size); 1084 if (!dev_priv->regs) { 1085 DRM_ERROR("failed to map registers\n"); 1086 ret = -EIO; 1087 goto free_priv; 1088 } 1089 1090 dev_priv->mm.gtt_mapping = 1091 io_mapping_create_wc(dev->agp->base, 1092 dev->agp->agp_info.aper_size * 1024*1024); 1093 /* Set up a WC MTRR for non-PAT systems. This is more common than 1094 * one would think, because the kernel disables PAT on first 1095 * generation Core chips because WC PAT gets overridden by a UC 1096 * MTRR if present. Even if a UC MTRR isn't present. 1097 */ 1098 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 1099 dev->agp->agp_info.aper_size * 1100 1024 * 1024, 1101 MTRR_TYPE_WRCOMB, 1); 1102 if (dev_priv->mm.gtt_mtrr < 0) { 1103 DRM_INFO("MTRR allocation failed\n. Graphics " 1104 "performance may suffer.\n"); 1105 } 1106 1107#ifdef CONFIG_HIGHMEM64G 1108 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ 1109 dev_priv->has_gem = 0; 1110#else 1111 /* enable GEM by default */ 1112 dev_priv->has_gem = 1; 1113#endif 1114 1115 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1116 if (IS_GM45(dev)) 1117 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1118 1119 i915_gem_load(dev); 1120 1121 /* Init HWS */ 1122 if (!I915_NEED_GFX_HWS(dev)) { 1123 ret = i915_init_phys_hws(dev); 1124 if (ret != 0) 1125 goto out_rmmap; 1126 } 1127 1128 /* On the 945G/GM, the chipset reports the MSI capability on the 1129 * integrated graphics even though the support isn't actually there 1130 * according to the published specs. It doesn't appear to function 1131 * correctly in testing on 945G. 1132 * This may be a side effect of MSI having been made available for PEG 1133 * and the registers being closely associated. 1134 * 1135 * According to chipset errata, on the 965GM, MSI interrupts may 1136 * be lost or delayed, but we use them anyways to avoid 1137 * stuck interrupts on some machines. 1138 */ 1139 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1140 pci_enable_msi(dev->pdev); 1141 1142 intel_opregion_init(dev); 1143 1144 spin_lock_init(&dev_priv->user_irq_lock); 1145 dev_priv->user_irq_refcount = 0; 1146 1147 ret = drm_vblank_init(dev, I915_NUM_PIPE); 1148 1149 if (ret) { 1150 (void) i915_driver_unload(dev); 1151 return ret; 1152 } 1153 1154 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1155 ret = i915_load_modeset_init(dev); 1156 if (ret < 0) { 1157 DRM_ERROR("failed to init modeset\n"); 1158 goto out_rmmap; 1159 } 1160 } 1161 1162 return 0; 1163 1164out_rmmap: 1165 iounmap(dev_priv->regs); 1166free_priv: 1167 drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER); 1168 return ret; 1169} 1170 1171int i915_driver_unload(struct drm_device *dev) 1172{ 1173 struct drm_i915_private *dev_priv = dev->dev_private; 1174 1175 io_mapping_free(dev_priv->mm.gtt_mapping); 1176 if (dev_priv->mm.gtt_mtrr >= 0) { 1177 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, 1178 dev->agp->agp_info.aper_size * 1024 * 1024); 1179 dev_priv->mm.gtt_mtrr = -1; 1180 } 1181 1182 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1183 drm_irq_uninstall(dev); 1184 } 1185 1186 if (dev->pdev->msi_enabled) 1187 pci_disable_msi(dev->pdev); 1188 1189 if (dev_priv->regs != NULL) 1190 iounmap(dev_priv->regs); 1191 1192 intel_opregion_free(dev); 1193 1194 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1195 intel_modeset_cleanup(dev); 1196 1197 i915_gem_free_all_phys_object(dev); 1198 1199 mutex_lock(&dev->struct_mutex); 1200 i915_gem_cleanup_ringbuffer(dev); 1201 mutex_unlock(&dev->struct_mutex); 1202 drm_mm_takedown(&dev_priv->vram); 1203 i915_gem_lastclose(dev); 1204 } 1205 1206 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 1207 DRM_MEM_DRIVER); 1208 1209 return 0; 1210} 1211 1212int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1213{ 1214 struct drm_i915_file_private *i915_file_priv; 1215 1216 DRM_DEBUG("\n"); 1217 i915_file_priv = (struct drm_i915_file_private *) 1218 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); 1219 1220 if (!i915_file_priv) 1221 return -ENOMEM; 1222 1223 file_priv->driver_priv = i915_file_priv; 1224 1225 i915_file_priv->mm.last_gem_seqno = 0; 1226 i915_file_priv->mm.last_gem_throttle_seqno = 0; 1227 1228 return 0; 1229} 1230 1231/** 1232 * i915_driver_lastclose - clean up after all DRM clients have exited 1233 * @dev: DRM device 1234 * 1235 * Take care of cleaning up after all DRM clients have exited. In the 1236 * mode setting case, we want to restore the kernel's initial mode (just 1237 * in case the last client left us in a bad state). 1238 * 1239 * Additionally, in the non-mode setting case, we'll tear down the AGP 1240 * and DMA structures, since the kernel won't be using them, and clea 1241 * up any GEM state. 1242 */ 1243void i915_driver_lastclose(struct drm_device * dev) 1244{ 1245 drm_i915_private_t *dev_priv = dev->dev_private; 1246 1247 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 1248 intelfb_restore(); 1249 return; 1250 } 1251 1252 i915_gem_lastclose(dev); 1253 1254 if (dev_priv->agp_heap) 1255 i915_mem_takedown(&(dev_priv->agp_heap)); 1256 1257 i915_dma_cleanup(dev); 1258} 1259 1260void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1261{ 1262 drm_i915_private_t *dev_priv = dev->dev_private; 1263 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1264 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 1265} 1266 1267void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 1268{ 1269 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1270 1271 drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); 1272} 1273 1274struct drm_ioctl_desc i915_ioctls[] = { 1275 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1276 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1277 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 1278 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 1279 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 1280 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 1281 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 1282 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1283 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), 1284 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), 1285 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1286 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 1287 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1288 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1289 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 1290 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1291 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1292 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1293 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 1294 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1295 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1296 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), 1297 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), 1298 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1299 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1300 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), 1301 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), 1302 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), 1303 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), 1304 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), 1305 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), 1306 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), 1307 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), 1308 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 1309 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), 1310}; 1311 1312int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1313 1314/** 1315 * Determine if the device really is AGP or not. 1316 * 1317 * All Intel graphics chipsets are treated as AGP, even if they are really 1318 * PCI-e. 1319 * 1320 * \param dev The device to be tested. 1321 * 1322 * \returns 1323 * A value of 1 is always retured to indictate every i9x5 is AGP. 1324 */ 1325int i915_driver_device_is_agp(struct drm_device * dev) 1326{ 1327 return 1; 1328} 1329