intel_context.c revision 1b4374d364f877d1b7d01e1231adeee2e0f63a4d
1/************************************************************************** 2 * 3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29#include "main/glheader.h" 30#include "main/context.h" 31#include "main/extensions.h" 32#include "main/fbobject.h" 33#include "main/framebuffer.h" 34#include "main/imports.h" 35#include "main/points.h" 36#include "main/renderbuffer.h" 37 38#include "swrast/swrast.h" 39#include "swrast_setup/swrast_setup.h" 40#include "tnl/tnl.h" 41#include "drivers/common/driverfuncs.h" 42#include "drivers/common/meta.h" 43 44#include "intel_chipset.h" 45#include "intel_buffers.h" 46#include "intel_tex.h" 47#include "intel_batchbuffer.h" 48#include "intel_clear.h" 49#include "intel_extensions.h" 50#include "intel_pixel.h" 51#include "intel_regions.h" 52#include "intel_buffer_objects.h" 53#include "intel_fbo.h" 54#include "intel_bufmgr.h" 55#include "intel_screen.h" 56#include "intel_mipmap_tree.h" 57 58#include "utils.h" 59#include "../glsl/ralloc.h" 60 61#ifndef INTEL_DEBUG 62int INTEL_DEBUG = (0); 63#endif 64 65 66static const GLubyte * 67intelGetString(struct gl_context * ctx, GLenum name) 68{ 69 const struct intel_context *const intel = intel_context(ctx); 70 const char *chipset; 71 static char buffer[128]; 72 73 switch (name) { 74 case GL_VENDOR: 75 return (GLubyte *) "Intel Open Source Technology Center"; 76 break; 77 78 case GL_RENDERER: 79 switch (intel->intelScreen->deviceID) { 80 case PCI_CHIP_845_G: 81 chipset = "Intel(R) 845G"; 82 break; 83 case PCI_CHIP_I830_M: 84 chipset = "Intel(R) 830M"; 85 break; 86 case PCI_CHIP_I855_GM: 87 chipset = "Intel(R) 852GM/855GM"; 88 break; 89 case PCI_CHIP_I865_G: 90 chipset = "Intel(R) 865G"; 91 break; 92 case PCI_CHIP_I915_G: 93 chipset = "Intel(R) 915G"; 94 break; 95 case PCI_CHIP_E7221_G: 96 chipset = "Intel (R) E7221G (i915)"; 97 break; 98 case PCI_CHIP_I915_GM: 99 chipset = "Intel(R) 915GM"; 100 break; 101 case PCI_CHIP_I945_G: 102 chipset = "Intel(R) 945G"; 103 break; 104 case PCI_CHIP_I945_GM: 105 chipset = "Intel(R) 945GM"; 106 break; 107 case PCI_CHIP_I945_GME: 108 chipset = "Intel(R) 945GME"; 109 break; 110 case PCI_CHIP_G33_G: 111 chipset = "Intel(R) G33"; 112 break; 113 case PCI_CHIP_Q35_G: 114 chipset = "Intel(R) Q35"; 115 break; 116 case PCI_CHIP_Q33_G: 117 chipset = "Intel(R) Q33"; 118 break; 119 case PCI_CHIP_IGD_GM: 120 case PCI_CHIP_IGD_G: 121 chipset = "Intel(R) IGD"; 122 break; 123 case PCI_CHIP_I965_Q: 124 chipset = "Intel(R) 965Q"; 125 break; 126 case PCI_CHIP_I965_G: 127 case PCI_CHIP_I965_G_1: 128 chipset = "Intel(R) 965G"; 129 break; 130 case PCI_CHIP_I946_GZ: 131 chipset = "Intel(R) 946GZ"; 132 break; 133 case PCI_CHIP_I965_GM: 134 chipset = "Intel(R) 965GM"; 135 break; 136 case PCI_CHIP_I965_GME: 137 chipset = "Intel(R) 965GME/GLE"; 138 break; 139 case PCI_CHIP_GM45_GM: 140 chipset = "Mobile Intel® GM45 Express Chipset"; 141 break; 142 case PCI_CHIP_IGD_E_G: 143 chipset = "Intel(R) Integrated Graphics Device"; 144 break; 145 case PCI_CHIP_G45_G: 146 chipset = "Intel(R) G45/G43"; 147 break; 148 case PCI_CHIP_Q45_G: 149 chipset = "Intel(R) Q45/Q43"; 150 break; 151 case PCI_CHIP_G41_G: 152 chipset = "Intel(R) G41"; 153 break; 154 case PCI_CHIP_B43_G: 155 case PCI_CHIP_B43_G1: 156 chipset = "Intel(R) B43"; 157 break; 158 case PCI_CHIP_ILD_G: 159 chipset = "Intel(R) Ironlake Desktop"; 160 break; 161 case PCI_CHIP_ILM_G: 162 chipset = "Intel(R) Ironlake Mobile"; 163 break; 164 case PCI_CHIP_SANDYBRIDGE_GT1: 165 case PCI_CHIP_SANDYBRIDGE_GT2: 166 case PCI_CHIP_SANDYBRIDGE_GT2_PLUS: 167 chipset = "Intel(R) Sandybridge Desktop"; 168 break; 169 case PCI_CHIP_SANDYBRIDGE_M_GT1: 170 case PCI_CHIP_SANDYBRIDGE_M_GT2: 171 case PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS: 172 chipset = "Intel(R) Sandybridge Mobile"; 173 break; 174 case PCI_CHIP_SANDYBRIDGE_S: 175 chipset = "Intel(R) Sandybridge Server"; 176 break; 177 case PCI_CHIP_IVYBRIDGE_GT1: 178 case PCI_CHIP_IVYBRIDGE_GT2: 179 chipset = "Intel(R) Ivybridge Desktop"; 180 break; 181 case PCI_CHIP_IVYBRIDGE_M_GT1: 182 case PCI_CHIP_IVYBRIDGE_M_GT2: 183 chipset = "Intel(R) Ivybridge Mobile"; 184 break; 185 case PCI_CHIP_IVYBRIDGE_S_GT1: 186 case PCI_CHIP_IVYBRIDGE_S_GT2: 187 chipset = "Intel(R) Ivybridge Server"; 188 break; 189 case PCI_CHIP_HASWELL_GT1: 190 case PCI_CHIP_HASWELL_GT2: 191 chipset = "Intel(R) Haswell Desktop"; 192 break; 193 case PCI_CHIP_HASWELL_M_GT1: 194 case PCI_CHIP_HASWELL_M_GT2: 195 case PCI_CHIP_HASWELL_M_ULT_GT2: 196 chipset = "Intel(R) Haswell Mobile"; 197 break; 198 default: 199 chipset = "Unknown Intel Chipset"; 200 break; 201 } 202 203 (void) driGetRendererString(buffer, chipset, 0); 204 return (GLubyte *) buffer; 205 206 default: 207 return NULL; 208 } 209} 210 211static void 212intel_flush_front(struct gl_context *ctx) 213{ 214 struct intel_context *intel = intel_context(ctx); 215 __DRIcontext *driContext = intel->driContext; 216 __DRIscreen *const screen = intel->intelScreen->driScrnPriv; 217 218 if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && intel->front_buffer_dirty) { 219 if (screen->dri2.loader && 220 (screen->dri2.loader->base.version >= 2) 221 && (screen->dri2.loader->flushFrontBuffer != NULL) && 222 driContext->driDrawablePriv && 223 driContext->driDrawablePriv->loaderPrivate) { 224 (*screen->dri2.loader->flushFrontBuffer)(driContext->driDrawablePriv, 225 driContext->driDrawablePriv->loaderPrivate); 226 227 /* We set the dirty bit in intel_prepare_render() if we're 228 * front buffer rendering once we get there. 229 */ 230 intel->front_buffer_dirty = false; 231 } 232 } 233} 234 235static unsigned 236intel_bits_per_pixel(const struct intel_renderbuffer *rb) 237{ 238 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8; 239} 240 241static void 242intel_query_dri2_buffers(struct intel_context *intel, 243 __DRIdrawable *drawable, 244 __DRIbuffer **buffers, 245 int *count); 246 247static void 248intel_process_dri2_buffer(struct intel_context *intel, 249 __DRIdrawable *drawable, 250 __DRIbuffer *buffer, 251 struct intel_renderbuffer *rb, 252 const char *buffer_name); 253 254void 255intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable) 256{ 257 struct gl_framebuffer *fb = drawable->driverPrivate; 258 struct intel_renderbuffer *rb; 259 struct intel_context *intel = context->driverPrivate; 260 __DRIbuffer *buffers = NULL; 261 int i, count; 262 const char *region_name; 263 264 /* If we're rendering to the fake front buffer, make sure all the 265 * pending drawing has landed on the real front buffer. Otherwise 266 * when we eventually get to DRI2GetBuffersWithFormat the stale 267 * real front buffer contents will get copied to the new fake front 268 * buffer. 269 */ 270 if (intel->is_front_buffer_rendering) { 271 intel_flush(&intel->ctx); 272 intel_flush_front(&intel->ctx); 273 } 274 275 /* Set this up front, so that in case our buffers get invalidated 276 * while we're getting new buffers, we don't clobber the stamp and 277 * thus ignore the invalidate. */ 278 drawable->lastStamp = drawable->dri2.stamp; 279 280 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) 281 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable); 282 283 intel_query_dri2_buffers(intel, drawable, &buffers, &count); 284 285 if (buffers == NULL) 286 return; 287 288 for (i = 0; i < count; i++) { 289 switch (buffers[i].attachment) { 290 case __DRI_BUFFER_FRONT_LEFT: 291 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT); 292 region_name = "dri2 front buffer"; 293 break; 294 295 case __DRI_BUFFER_FAKE_FRONT_LEFT: 296 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT); 297 region_name = "dri2 fake front buffer"; 298 break; 299 300 case __DRI_BUFFER_BACK_LEFT: 301 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT); 302 region_name = "dri2 back buffer"; 303 break; 304 305 case __DRI_BUFFER_DEPTH: 306 case __DRI_BUFFER_HIZ: 307 case __DRI_BUFFER_DEPTH_STENCIL: 308 case __DRI_BUFFER_STENCIL: 309 case __DRI_BUFFER_ACCUM: 310 default: 311 fprintf(stderr, 312 "unhandled buffer attach event, attachment type %d\n", 313 buffers[i].attachment); 314 return; 315 } 316 317 intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name); 318 } 319 320 driUpdateFramebufferSize(&intel->ctx, drawable); 321} 322 323/** 324 * intel_prepare_render should be called anywhere that curent read/drawbuffer 325 * state is required. 326 */ 327void 328intel_prepare_render(struct intel_context *intel) 329{ 330 __DRIcontext *driContext = intel->driContext; 331 __DRIdrawable *drawable; 332 333 drawable = driContext->driDrawablePriv; 334 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) { 335 if (drawable->lastStamp != drawable->dri2.stamp) 336 intel_update_renderbuffers(driContext, drawable); 337 intel_draw_buffer(&intel->ctx); 338 driContext->dri2.draw_stamp = drawable->dri2.stamp; 339 } 340 341 drawable = driContext->driReadablePriv; 342 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) { 343 if (drawable->lastStamp != drawable->dri2.stamp) 344 intel_update_renderbuffers(driContext, drawable); 345 driContext->dri2.read_stamp = drawable->dri2.stamp; 346 } 347 348 /* If we're currently rendering to the front buffer, the rendering 349 * that will happen next will probably dirty the front buffer. So 350 * mark it as dirty here. 351 */ 352 if (intel->is_front_buffer_rendering) 353 intel->front_buffer_dirty = true; 354 355 /* Wait for the swapbuffers before the one we just emitted, so we 356 * don't get too many swaps outstanding for apps that are GPU-heavy 357 * but not CPU-heavy. 358 * 359 * We're using intelDRI2Flush (called from the loader before 360 * swapbuffer) and glFlush (for front buffer rendering) as the 361 * indicator that a frame is done and then throttle when we get 362 * here as we prepare to render the next frame. At this point for 363 * round trips for swap/copy and getting new buffers are done and 364 * we'll spend less time waiting on the GPU. 365 * 366 * Unfortunately, we don't have a handle to the batch containing 367 * the swap, and getting our hands on that doesn't seem worth it, 368 * so we just us the first batch we emitted after the last swap. 369 */ 370 if (intel->need_throttle && intel->first_post_swapbuffers_batch) { 371 drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch); 372 drm_intel_bo_unreference(intel->first_post_swapbuffers_batch); 373 intel->first_post_swapbuffers_batch = NULL; 374 intel->need_throttle = false; 375 } 376} 377 378static void 379intel_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei w, GLsizei h) 380{ 381 struct intel_context *intel = intel_context(ctx); 382 __DRIcontext *driContext = intel->driContext; 383 384 if (intel->saved_viewport) 385 intel->saved_viewport(ctx, x, y, w, h); 386 387 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) { 388 dri2InvalidateDrawable(driContext->driDrawablePriv); 389 dri2InvalidateDrawable(driContext->driReadablePriv); 390 } 391} 392 393static const struct dri_debug_control debug_control[] = { 394 { "tex", DEBUG_TEXTURE}, 395 { "state", DEBUG_STATE}, 396 { "ioctl", DEBUG_IOCTL}, 397 { "blit", DEBUG_BLIT}, 398 { "mip", DEBUG_MIPTREE}, 399 { "fall", DEBUG_FALLBACKS}, 400 { "verb", DEBUG_VERBOSE}, 401 { "bat", DEBUG_BATCH}, 402 { "pix", DEBUG_PIXEL}, 403 { "buf", DEBUG_BUFMGR}, 404 { "reg", DEBUG_REGION}, 405 { "fbo", DEBUG_FBO}, 406 { "gs", DEBUG_GS}, 407 { "sync", DEBUG_SYNC}, 408 { "prim", DEBUG_PRIMS }, 409 { "vert", DEBUG_VERTS }, 410 { "dri", DEBUG_DRI }, 411 { "sf", DEBUG_SF }, 412 { "san", DEBUG_SANITY }, 413 { "sleep", DEBUG_SLEEP }, 414 { "stats", DEBUG_STATS }, 415 { "tile", DEBUG_TILE }, 416 { "wm", DEBUG_WM }, 417 { "urb", DEBUG_URB }, 418 { "vs", DEBUG_VS }, 419 { "clip", DEBUG_CLIP }, 420 { "aub", DEBUG_AUB }, 421 { NULL, 0 } 422}; 423 424 425static void 426intelInvalidateState(struct gl_context * ctx, GLuint new_state) 427{ 428 struct intel_context *intel = intel_context(ctx); 429 430 _swrast_InvalidateState(ctx, new_state); 431 _vbo_InvalidateState(ctx, new_state); 432 433 intel->NewGLState |= new_state; 434 435 if (intel->vtbl.invalidate_state) 436 intel->vtbl.invalidate_state( intel, new_state ); 437} 438 439void 440intel_flush_rendering_to_batch(struct gl_context *ctx) 441{ 442 struct intel_context *intel = intel_context(ctx); 443 444 if (intel->Fallback) 445 _swrast_flush(ctx); 446 447 if (intel->gen < 4) 448 INTEL_FIREVERTICES(intel); 449} 450 451void 452_intel_flush(struct gl_context *ctx, const char *file, int line) 453{ 454 struct intel_context *intel = intel_context(ctx); 455 456 intel_flush_rendering_to_batch(ctx); 457 458 if (intel->batch.used) 459 _intel_batchbuffer_flush(intel, file, line); 460} 461 462static void 463intel_glFlush(struct gl_context *ctx) 464{ 465 struct intel_context *intel = intel_context(ctx); 466 467 intel_flush(ctx); 468 intel_flush_front(ctx); 469 if (intel->is_front_buffer_rendering) 470 intel->need_throttle = true; 471} 472 473void 474intelFinish(struct gl_context * ctx) 475{ 476 struct intel_context *intel = intel_context(ctx); 477 478 intel_flush(ctx); 479 intel_flush_front(ctx); 480 481 if (intel->batch.last_bo) 482 drm_intel_bo_wait_rendering(intel->batch.last_bo); 483} 484 485void 486intelInitDriverFunctions(struct dd_function_table *functions) 487{ 488 _mesa_init_driver_functions(functions); 489 490 functions->Flush = intel_glFlush; 491 functions->Finish = intelFinish; 492 functions->GetString = intelGetString; 493 functions->UpdateState = intelInvalidateState; 494 495 intelInitTextureFuncs(functions); 496 intelInitTextureImageFuncs(functions); 497 intelInitTextureSubImageFuncs(functions); 498 intelInitTextureCopyImageFuncs(functions); 499 intelInitStateFuncs(functions); 500 intelInitClearFuncs(functions); 501 intelInitBufferFuncs(functions); 502 intelInitPixelFuncs(functions); 503 intelInitBufferObjectFuncs(functions); 504 intel_init_syncobj_functions(functions); 505} 506 507bool 508intelInitContext(struct intel_context *intel, 509 int api, 510 const struct gl_config * mesaVis, 511 __DRIcontext * driContextPriv, 512 void *sharedContextPrivate, 513 struct dd_function_table *functions) 514{ 515 struct gl_context *ctx = &intel->ctx; 516 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate; 517 __DRIscreen *sPriv = driContextPriv->driScreenPriv; 518 struct intel_screen *intelScreen = sPriv->driverPrivate; 519 int bo_reuse_mode; 520 struct gl_config visual; 521 522 /* we can't do anything without a connection to the device */ 523 if (intelScreen->bufmgr == NULL) 524 return false; 525 526 /* Can't rely on invalidate events, fall back to glViewport hack */ 527 if (!driContextPriv->driScreenPriv->dri2.useInvalidate) { 528 intel->saved_viewport = functions->Viewport; 529 functions->Viewport = intel_viewport; 530 } 531 532 if (mesaVis == NULL) { 533 memset(&visual, 0, sizeof visual); 534 mesaVis = &visual; 535 } 536 537 if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx, 538 functions, (void *) intel)) { 539 printf("%s: failed to init mesa context\n", __FUNCTION__); 540 return false; 541 } 542 543 driContextPriv->driverPrivate = intel; 544 intel->intelScreen = intelScreen; 545 intel->driContext = driContextPriv; 546 intel->driFd = sPriv->fd; 547 548 intel->gen = intelScreen->gen; 549 550 const int devID = intelScreen->deviceID; 551 if (IS_SNB_GT1(devID) || IS_IVB_GT1(devID) || IS_HSW_GT1(devID)) 552 intel->gt = 1; 553 else if (IS_SNB_GT2(devID) || IS_IVB_GT2(devID) || IS_HSW_GT2(devID)) 554 intel->gt = 2; 555 else 556 intel->gt = 0; 557 558 if (IS_HASWELL(devID)) { 559 intel->is_haswell = true; 560 } else if (IS_G4X(devID)) { 561 intel->is_g4x = true; 562 } else if (IS_945(devID)) { 563 intel->is_945 = true; 564 } 565 566 if (intel->gen >= 5) { 567 intel->needs_ff_sync = true; 568 } 569 570 intel->has_separate_stencil = intel->intelScreen->hw_has_separate_stencil; 571 intel->must_use_separate_stencil = intel->intelScreen->hw_must_use_separate_stencil; 572 intel->has_hiz = intel->gen >= 6 && !intel->is_haswell; 573 intel->has_llc = intel->intelScreen->hw_has_llc; 574 intel->has_swizzling = intel->intelScreen->hw_has_swizzling; 575 576 memset(&ctx->TextureFormatSupported, 577 0, sizeof(ctx->TextureFormatSupported)); 578 579 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache, 580 sPriv->myNum, (intel->gen >= 4) ? "i965" : "i915"); 581 if (intel->gen < 4) 582 intel->maxBatchSize = 4096; 583 else 584 intel->maxBatchSize = sizeof(intel->batch.map); 585 586 intel->bufmgr = intelScreen->bufmgr; 587 588 bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse"); 589 switch (bo_reuse_mode) { 590 case DRI_CONF_BO_REUSE_DISABLED: 591 break; 592 case DRI_CONF_BO_REUSE_ALL: 593 intel_bufmgr_gem_enable_reuse(intel->bufmgr); 594 break; 595 } 596 597 ctx->Const.MinLineWidth = 1.0; 598 ctx->Const.MinLineWidthAA = 1.0; 599 ctx->Const.MaxLineWidth = 5.0; 600 ctx->Const.MaxLineWidthAA = 5.0; 601 ctx->Const.LineWidthGranularity = 0.5; 602 603 ctx->Const.MinPointSize = 1.0; 604 ctx->Const.MinPointSizeAA = 1.0; 605 ctx->Const.MaxPointSize = 255.0; 606 ctx->Const.MaxPointSizeAA = 3.0; 607 ctx->Const.PointSizeGranularity = 1.0; 608 609 ctx->Const.MaxSamples = 1.0; 610 611 if (intel->gen >= 6) 612 ctx->Const.MaxClipPlanes = 8; 613 614 ctx->Const.StripTextureBorder = GL_TRUE; 615 616 /* reinitialize the context point state. 617 * It depend on constants in __struct gl_contextRec::Const 618 */ 619 _mesa_init_point(ctx); 620 621 if (intel->gen >= 4) { 622 ctx->Const.MaxRenderbufferSize = 8192; 623 } else { 624 ctx->Const.MaxRenderbufferSize = 2048; 625 } 626 627 /* Initialize the software rasterizer and helper modules. */ 628 _swrast_CreateContext(ctx); 629 _vbo_CreateContext(ctx); 630 _tnl_CreateContext(ctx); 631 _swsetup_CreateContext(ctx); 632 633 /* Configure swrast to match hardware characteristics: */ 634 _swrast_allow_pixel_fog(ctx, false); 635 _swrast_allow_vertex_fog(ctx, true); 636 637 _mesa_meta_init(ctx); 638 639 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24; 640 intel->hw_stipple = 1; 641 642 /* XXX FBO: this doesn't seem to be used anywhere */ 643 switch (mesaVis->depthBits) { 644 case 0: /* what to do in this case? */ 645 case 16: 646 intel->polygon_offset_scale = 1.0; 647 break; 648 case 24: 649 intel->polygon_offset_scale = 2.0; /* req'd to pass glean */ 650 break; 651 default: 652 assert(0); 653 break; 654 } 655 656 if (intel->gen >= 4) 657 intel->polygon_offset_scale /= 0xffff; 658 659 intel->RenderIndex = ~0; 660 661 intelInitExtensions(ctx); 662 663 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control); 664 if (INTEL_DEBUG & DEBUG_BUFMGR) 665 dri_bufmgr_set_debug(intel->bufmgr, true); 666 667 if (INTEL_DEBUG & DEBUG_AUB) 668 drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true); 669 670 intel_batchbuffer_init(intel); 671 672 intel_fbo_init(intel); 673 674 intel->use_texture_tiling = driQueryOptionb(&intel->optionCache, 675 "texture_tiling"); 676 intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z"); 677 678 if (!driQueryOptionb(&intel->optionCache, "hiz")) { 679 intel->has_hiz = false; 680 /* On gen6, you can only do separate stencil with HIZ. */ 681 if (intel->gen == 6) 682 intel->has_separate_stencil = false; 683 } 684 685 intel->prim.primitive = ~0; 686 687 /* Force all software fallbacks */ 688 if (driQueryOptionb(&intel->optionCache, "no_rast")) { 689 fprintf(stderr, "disabling 3D rasterization\n"); 690 intel->no_rast = 1; 691 } 692 693 if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) { 694 fprintf(stderr, "flushing batchbuffer before/after each draw call\n"); 695 intel->always_flush_batch = 1; 696 } 697 698 if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) { 699 fprintf(stderr, "flushing GPU caches before/after each draw call\n"); 700 intel->always_flush_cache = 1; 701 } 702 703 return true; 704} 705 706void 707intelDestroyContext(__DRIcontext * driContextPriv) 708{ 709 struct intel_context *intel = 710 (struct intel_context *) driContextPriv->driverPrivate; 711 712 assert(intel); /* should never be null */ 713 if (intel) { 714 INTEL_FIREVERTICES(intel); 715 716 /* Dump a final BMP in case the application doesn't call SwapBuffers */ 717 if (INTEL_DEBUG & DEBUG_AUB) { 718 intel_batchbuffer_flush(intel); 719 aub_dump_bmp(&intel->ctx); 720 } 721 722 _mesa_meta_free(&intel->ctx); 723 724 intel->vtbl.destroy(intel); 725 726 _swsetup_DestroyContext(&intel->ctx); 727 _tnl_DestroyContext(&intel->ctx); 728 _vbo_DestroyContext(&intel->ctx); 729 730 _swrast_DestroyContext(&intel->ctx); 731 intel->Fallback = 0x0; /* don't call _swrast_Flush later */ 732 733 intel_batchbuffer_free(intel); 734 735 free(intel->prim.vb); 736 intel->prim.vb = NULL; 737 drm_intel_bo_unreference(intel->prim.vb_bo); 738 intel->prim.vb_bo = NULL; 739 drm_intel_bo_unreference(intel->first_post_swapbuffers_batch); 740 intel->first_post_swapbuffers_batch = NULL; 741 742 driDestroyOptionCache(&intel->optionCache); 743 744 /* free the Mesa context */ 745 _mesa_free_context_data(&intel->ctx); 746 747 _math_matrix_dtr(&intel->ViewportMatrix); 748 749 ralloc_free(intel); 750 driContextPriv->driverPrivate = NULL; 751 } 752} 753 754GLboolean 755intelUnbindContext(__DRIcontext * driContextPriv) 756{ 757 /* Unset current context and dispath table */ 758 _mesa_make_current(NULL, NULL, NULL); 759 760 return true; 761} 762 763GLboolean 764intelMakeCurrent(__DRIcontext * driContextPriv, 765 __DRIdrawable * driDrawPriv, 766 __DRIdrawable * driReadPriv) 767{ 768 struct intel_context *intel; 769 GET_CURRENT_CONTEXT(curCtx); 770 771 if (driContextPriv) 772 intel = (struct intel_context *) driContextPriv->driverPrivate; 773 else 774 intel = NULL; 775 776 /* According to the glXMakeCurrent() man page: "Pending commands to 777 * the previous context, if any, are flushed before it is released." 778 * But only flush if we're actually changing contexts. 779 */ 780 if (intel_context(curCtx) && intel_context(curCtx) != intel) { 781 _mesa_flush(curCtx); 782 } 783 784 if (driContextPriv) { 785 struct gl_framebuffer *fb, *readFb; 786 787 if (driDrawPriv == NULL && driReadPriv == NULL) { 788 fb = _mesa_get_incomplete_framebuffer(); 789 readFb = _mesa_get_incomplete_framebuffer(); 790 } else { 791 fb = driDrawPriv->driverPrivate; 792 readFb = driReadPriv->driverPrivate; 793 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1; 794 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1; 795 } 796 797 intel_prepare_render(intel); 798 _mesa_make_current(&intel->ctx, fb, readFb); 799 800 /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer 801 * is NULL at that point. We can't call _mesa_makecurrent() 802 * first, since we need the buffer size for the initial 803 * viewport. So just call intel_draw_buffer() again here. */ 804 intel_draw_buffer(&intel->ctx); 805 } 806 else { 807 _mesa_make_current(NULL, NULL, NULL); 808 } 809 810 return true; 811} 812 813/** 814 * \brief Query DRI2 to obtain a DRIdrawable's buffers. 815 * 816 * To determine which DRI buffers to request, examine the renderbuffers 817 * attached to the drawable's framebuffer. Then request the buffers with 818 * DRI2GetBuffers() or DRI2GetBuffersWithFormat(). 819 * 820 * This is called from intel_update_renderbuffers(). 821 * 822 * \param drawable Drawable whose buffers are queried. 823 * \param buffers [out] List of buffers returned by DRI2 query. 824 * \param buffer_count [out] Number of buffers returned. 825 * 826 * \see intel_update_renderbuffers() 827 * \see DRI2GetBuffers() 828 * \see DRI2GetBuffersWithFormat() 829 */ 830static void 831intel_query_dri2_buffers(struct intel_context *intel, 832 __DRIdrawable *drawable, 833 __DRIbuffer **buffers, 834 int *buffer_count) 835{ 836 __DRIscreen *screen = intel->intelScreen->driScrnPriv; 837 struct gl_framebuffer *fb = drawable->driverPrivate; 838 839 if (screen->dri2.loader 840 && screen->dri2.loader->base.version > 2 841 && screen->dri2.loader->getBuffersWithFormat != NULL) { 842 843 int i = 0; 844 const int max_attachments = 4; 845 unsigned *attachments = calloc(2 * max_attachments, sizeof(unsigned)); 846 847 struct intel_renderbuffer *front_rb; 848 struct intel_renderbuffer *back_rb; 849 850 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT); 851 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT); 852 853 if ((intel->is_front_buffer_rendering || 854 intel->is_front_buffer_reading || 855 !back_rb) && front_rb) { 856 attachments[i++] = __DRI_BUFFER_FRONT_LEFT; 857 attachments[i++] = intel_bits_per_pixel(front_rb); 858 } 859 860 if (back_rb) { 861 attachments[i++] = __DRI_BUFFER_BACK_LEFT; 862 attachments[i++] = intel_bits_per_pixel(back_rb); 863 } 864 865 assert(i <= 2 * max_attachments); 866 867 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable, 868 &drawable->w, 869 &drawable->h, 870 attachments, i / 2, 871 buffer_count, 872 drawable->loaderPrivate); 873 free(attachments); 874 875 } else { 876 *buffers = NULL; 877 *buffer_count = 0; 878 } 879} 880 881/** 882 * \brief Assign a DRI buffer's DRM region to a renderbuffer. 883 * 884 * This is called from intel_update_renderbuffers(). 885 * 886 * \par Note: 887 * DRI buffers whose attachment point is DRI2BufferStencil or 888 * DRI2BufferDepthStencil are handled as special cases. 889 * 890 * \param buffer_name is a human readable name, such as "dri2 front buffer", 891 * that is passed to intel_region_alloc_for_handle(). 892 * 893 * \see intel_update_renderbuffers() 894 * \see intel_region_alloc_for_handle() 895 */ 896static void 897intel_process_dri2_buffer(struct intel_context *intel, 898 __DRIdrawable *drawable, 899 __DRIbuffer *buffer, 900 struct intel_renderbuffer *rb, 901 const char *buffer_name) 902{ 903 struct intel_region *region = NULL; 904 905 if (!rb) 906 return; 907 908 if (rb->mt && 909 rb->mt->region && 910 rb->mt->region->name == buffer->name) 911 return; 912 913 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) { 914 fprintf(stderr, 915 "attaching buffer %d, at %d, cpp %d, pitch %d\n", 916 buffer->name, buffer->attachment, 917 buffer->cpp, buffer->pitch); 918 } 919 920 intel_miptree_release(&rb->mt); 921 region = intel_region_alloc_for_handle(intel->intelScreen, 922 buffer->cpp, 923 drawable->w, 924 drawable->h, 925 buffer->pitch / buffer->cpp, 926 buffer->name, 927 buffer_name); 928 if (!region) 929 return; 930 931 rb->mt = intel_miptree_create_for_region(intel, 932 GL_TEXTURE_2D, 933 intel_rb_format(rb), 934 region); 935 intel_region_release(®ion); 936} 937