intel_context.c revision a995bdced20a55759dffd901c10ec5fb251191cf
1/************************************************************************** 2 * 3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29#include "glheader.h" 30#include "context.h" 31#include "matrix.h" 32#include "simple_list.h" 33#include "extensions.h" 34#include "framebuffer.h" 35#include "imports.h" 36#include "points.h" 37 38#include "swrast/swrast.h" 39#include "swrast_setup/swrast_setup.h" 40#include "tnl/tnl.h" 41 42#include "tnl/t_pipeline.h" 43#include "tnl/t_vertex.h" 44 45#include "drivers/common/driverfuncs.h" 46 47#include "intel_screen.h" 48 49#include "i830_dri.h" 50 51#include "intel_chipset.h" 52#include "intel_buffers.h" 53#include "intel_tex.h" 54#include "intel_ioctl.h" 55#include "intel_batchbuffer.h" 56#include "intel_blit.h" 57#include "intel_pixel.h" 58#include "intel_regions.h" 59#include "intel_buffer_objects.h" 60#include "intel_fbo.h" 61#include "intel_decode.h" 62#include "intel_bufmgr.h" 63 64#include "drirenderbuffer.h" 65#include "vblank.h" 66#include "utils.h" 67#include "xmlpool.h" /* for symbolic values of enum-type options */ 68#ifndef INTEL_DEBUG 69int INTEL_DEBUG = (0); 70#endif 71 72#define need_GL_NV_point_sprite 73#define need_GL_ARB_multisample 74#define need_GL_ARB_point_parameters 75#define need_GL_ARB_texture_compression 76#define need_GL_ARB_vertex_buffer_object 77#define need_GL_ARB_vertex_program 78#define need_GL_ARB_window_pos 79#define need_GL_ARB_occlusion_query 80#define need_GL_EXT_blend_color 81#define need_GL_EXT_blend_equation_separate 82#define need_GL_EXT_blend_func_separate 83#define need_GL_EXT_blend_minmax 84#define need_GL_EXT_cull_vertex 85#define need_GL_EXT_fog_coord 86#define need_GL_EXT_framebuffer_object 87#define need_GL_EXT_multi_draw_arrays 88#define need_GL_EXT_secondary_color 89#define need_GL_NV_vertex_program 90#define need_GL_ATI_separate_stencil 91#define need_GL_EXT_point_parameters 92#define need_GL_VERSION_2_0 93#define need_GL_VERSION_2_1 94#define need_GL_ARB_shader_objects 95#define need_GL_ARB_vertex_shader 96 97#include "extension_helper.h" 98 99#define DRIVER_DATE "20061102" 100 101static const GLubyte * 102intelGetString(GLcontext * ctx, GLenum name) 103{ 104 const char *chipset; 105 static char buffer[128]; 106 107 switch (name) { 108 case GL_VENDOR: 109 return (GLubyte *) "Tungsten Graphics, Inc"; 110 break; 111 112 case GL_RENDERER: 113 switch (intel_context(ctx)->intelScreen->deviceID) { 114 case PCI_CHIP_845_G: 115 chipset = "Intel(R) 845G"; 116 break; 117 case PCI_CHIP_I830_M: 118 chipset = "Intel(R) 830M"; 119 break; 120 case PCI_CHIP_I855_GM: 121 chipset = "Intel(R) 852GM/855GM"; 122 break; 123 case PCI_CHIP_I865_G: 124 chipset = "Intel(R) 865G"; 125 break; 126 case PCI_CHIP_I915_G: 127 chipset = "Intel(R) 915G"; 128 break; 129 case PCI_CHIP_E7221_G: 130 chipset = "Intel (R) E7221G (i915)"; 131 break; 132 case PCI_CHIP_I915_GM: 133 chipset = "Intel(R) 915GM"; 134 break; 135 case PCI_CHIP_I945_G: 136 chipset = "Intel(R) 945G"; 137 break; 138 case PCI_CHIP_I945_GM: 139 chipset = "Intel(R) 945GM"; 140 break; 141 case PCI_CHIP_I945_GME: 142 chipset = "Intel(R) 945GME"; 143 break; 144 case PCI_CHIP_G33_G: 145 chipset = "Intel(R) G33"; 146 break; 147 case PCI_CHIP_Q35_G: 148 chipset = "Intel(R) Q35"; 149 break; 150 case PCI_CHIP_Q33_G: 151 chipset = "Intel(R) Q33"; 152 break; 153 case PCI_CHIP_I965_Q: 154 chipset = "Intel(R) 965Q"; 155 break; 156 case PCI_CHIP_I965_G: 157 case PCI_CHIP_I965_G_1: 158 chipset = "Intel(R) 965G"; 159 break; 160 case PCI_CHIP_I946_GZ: 161 chipset = "Intel(R) 946GZ"; 162 break; 163 case PCI_CHIP_I965_GM: 164 chipset = "Intel(R) 965GM"; 165 break; 166 case PCI_CHIP_I965_GME: 167 chipset = "Intel(R) 965GME/GLE"; 168 break; 169 case PCI_CHIP_IGD_GM: 170 case PCI_CHIP_IGD_E_G: 171 chipset = "Intel(R) Integrated Graphics Device"; 172 break; 173 case PCI_CHIP_G45_G: 174 chipset = "Intel(R) G45/G43"; 175 break; 176 case PCI_CHIP_Q45_G: 177 chipset = "Intel(R) Q45/Q43"; 178 break; 179 default: 180 chipset = "Unknown Intel Chipset"; 181 break; 182 } 183 184 (void) driGetRendererString(buffer, chipset, DRIVER_DATE, 0); 185 return (GLubyte *) buffer; 186 187 default: 188 return NULL; 189 } 190} 191 192/** 193 * Extension strings exported by the intel driver. 194 * 195 * \note 196 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the 197 * old i830-specific driver. 198 */ 199static const struct dri_extension card_extensions[] = { 200 {"GL_ARB_multisample", GL_ARB_multisample_functions}, 201 {"GL_ARB_multitexture", NULL}, 202 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions}, 203 {"GL_NV_point_sprite", GL_NV_point_sprite_functions}, 204 {"GL_ARB_texture_border_clamp", NULL}, 205 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions}, 206 {"GL_ARB_texture_cube_map", NULL}, 207 {"GL_ARB_texture_env_add", NULL}, 208 {"GL_ARB_texture_env_combine", NULL}, 209 {"GL_ARB_texture_env_dot3", NULL}, 210 {"GL_ARB_texture_mirrored_repeat", NULL}, 211 {"GL_ARB_texture_non_power_of_two", NULL }, 212 {"GL_ARB_texture_rectangle", NULL}, 213 {"GL_NV_texture_rectangle", NULL}, 214 {"GL_EXT_texture_rectangle", NULL}, 215 {"GL_ARB_point_parameters", NULL}, 216 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions}, 217 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions}, 218 {"GL_ARB_window_pos", GL_ARB_window_pos_functions}, 219 {"GL_EXT_blend_color", GL_EXT_blend_color_functions}, 220 {"GL_EXT_blend_equation_separate", 221 GL_EXT_blend_equation_separate_functions}, 222 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions}, 223 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions}, 224 {"GL_EXT_blend_logic_op", NULL}, 225 {"GL_EXT_blend_subtract", NULL}, 226 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions}, 227 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions}, 228 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions}, 229 {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions}, 230#if 1 /* XXX FBO temporary? */ 231 {"GL_EXT_packed_depth_stencil", NULL}, 232#endif 233 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions}, 234 {"GL_EXT_stencil_wrap", NULL}, 235 {"GL_EXT_texture_edge_clamp", NULL}, 236 {"GL_EXT_texture_env_combine", NULL}, 237 {"GL_EXT_texture_env_dot3", NULL}, 238 {"GL_EXT_texture_filter_anisotropic", NULL}, 239 {"GL_EXT_texture_lod_bias", NULL}, 240 {"GL_3DFX_texture_compression_FXT1", NULL}, 241 {"GL_APPLE_client_storage", NULL}, 242 {"GL_MESA_pack_invert", NULL}, 243 {"GL_MESA_ycbcr_texture", NULL}, 244 {"GL_NV_blend_square", NULL}, 245 {"GL_NV_vertex_program", GL_NV_vertex_program_functions}, 246 {"GL_NV_vertex_program1_1", NULL}, 247 { "GL_SGIS_generate_mipmap", NULL }, 248 {NULL, NULL} 249}; 250 251static const struct dri_extension brw_extensions[] = { 252 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions}, 253 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions}, 254 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions}, 255 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions}, 256 { "GL_ARB_point_sprite", NULL}, 257 { "GL_ARB_fragment_shader", NULL }, 258 { "GL_ARB_draw_buffers", NULL }, 259 { "GL_ARB_depth_texture", NULL }, 260 { "GL_ARB_fragment_program", NULL }, 261 { "GL_ARB_shadow", NULL }, 262 { "GL_EXT_shadow_funcs", NULL }, 263 /* ARB extn won't work if not enabled */ 264 { "GL_SGIX_depth_texture", NULL }, 265 { "GL_ARB_texture_env_crossbar", NULL }, 266 { "GL_EXT_texture_sRGB", NULL}, 267 { NULL, NULL } 268}; 269 270static const struct dri_extension arb_oc_extensions[] = { 271 {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions}, 272 {NULL, NULL} 273}; 274 275static const struct dri_extension ttm_extensions[] = { 276 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions}, 277 {"GL_ARB_pixel_buffer_object", NULL}, 278 {NULL, NULL} 279}; 280 281/** 282 * Initializes potential list of extensions if ctx == NULL, or actually enables 283 * extensions for a context. 284 */ 285void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging) 286{ 287 struct intel_context *intel = ctx?intel_context(ctx):NULL; 288 289 /* Disable imaging extension until convolution is working in teximage paths. 290 */ 291 enable_imaging = GL_FALSE; 292 293 driInitExtensions(ctx, card_extensions, enable_imaging); 294 295 if (intel == NULL || intel->ttm) 296 driInitExtensions(ctx, ttm_extensions, GL_FALSE); 297 298 if (intel == NULL || 299 (IS_965(intel->intelScreen->deviceID) && 300 intel->intelScreen->drmMinor >= 8)) 301 driInitExtensions(ctx, arb_oc_extensions, GL_FALSE); 302 303 if (intel == NULL || IS_965(intel->intelScreen->deviceID)) 304 driInitExtensions(ctx, brw_extensions, GL_FALSE); 305} 306 307static const struct dri_debug_control debug_control[] = { 308 { "tex", DEBUG_TEXTURE}, 309 { "state", DEBUG_STATE}, 310 { "ioctl", DEBUG_IOCTL}, 311 { "blit", DEBUG_BLIT}, 312 { "mip", DEBUG_MIPTREE}, 313 { "fall", DEBUG_FALLBACKS}, 314 { "verb", DEBUG_VERBOSE}, 315 { "bat", DEBUG_BATCH}, 316 { "pix", DEBUG_PIXEL}, 317 { "buf", DEBUG_BUFMGR}, 318 { "reg", DEBUG_REGION}, 319 { "fbo", DEBUG_FBO}, 320 { "lock", DEBUG_LOCK}, 321 { "sync", DEBUG_SYNC}, 322 { "prim", DEBUG_PRIMS }, 323 { "vert", DEBUG_VERTS }, 324 { "dri", DEBUG_DRI }, 325 { "dma", DEBUG_DMA }, 326 { "san", DEBUG_SANITY }, 327 { "sleep", DEBUG_SLEEP }, 328 { "stats", DEBUG_STATS }, 329 { "tile", DEBUG_TILE }, 330 { "sing", DEBUG_SINGLE_THREAD }, 331 { "thre", DEBUG_SINGLE_THREAD }, 332 { "wm", DEBUG_WM }, 333 { "urb", DEBUG_URB }, 334 { "vs", DEBUG_VS }, 335 { NULL, 0 } 336}; 337 338 339static void 340intelInvalidateState(GLcontext * ctx, GLuint new_state) 341{ 342 struct intel_context *intel = intel_context(ctx); 343 344 _swrast_InvalidateState(ctx, new_state); 345 _swsetup_InvalidateState(ctx, new_state); 346 _vbo_InvalidateState(ctx, new_state); 347 _tnl_InvalidateState(ctx, new_state); 348 _tnl_invalidate_vertex_state(ctx, new_state); 349 350 intel->NewGLState |= new_state; 351 352 if (intel->vtbl.invalidate_state) 353 intel->vtbl.invalidate_state( intel, new_state ); 354} 355 356 357void 358intelFlush(GLcontext * ctx) 359{ 360 struct intel_context *intel = intel_context(ctx); 361 362 if (intel->Fallback) 363 _swrast_flush(ctx); 364 365 if (!IS_965(intel->intelScreen->deviceID)) 366 INTEL_FIREVERTICES(intel); 367 368 /* Emit a flush so that any frontbuffer rendering that might have occurred 369 * lands onscreen in a timely manner, even if the X Server doesn't trigger 370 * a flush for us. 371 */ 372 intel_batchbuffer_emit_mi_flush(intel->batch); 373 374 if (intel->batch->map != intel->batch->ptr) 375 intel_batchbuffer_flush(intel->batch); 376} 377 378void 379intelFinish(GLcontext * ctx) 380{ 381 struct gl_framebuffer *fb = ctx->DrawBuffer; 382 int i; 383 384 intelFlush(ctx); 385 386 for (i = 0; i < fb->_NumColorDrawBuffers; i++) { 387 struct intel_renderbuffer *irb; 388 389 irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]); 390 391 if (irb->region) 392 dri_bo_wait_rendering(irb->region->buffer); 393 } 394 if (fb->_DepthBuffer) { 395 /* XXX: Wait on buffer idle */ 396 } 397} 398 399static void 400intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q) 401{ 402 struct intel_context *intel = intel_context( ctx ); 403 struct drm_i915_mmio io = { 404 .read_write = I915_MMIO_READ, 405 .reg = MMIO_REGS_PS_DEPTH_COUNT, 406 .data = &q->Result 407 }; 408 intel->stats_wm++; 409 intelFinish(&intel->ctx); 410 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io)); 411} 412 413static void 414intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q) 415{ 416 struct intel_context *intel = intel_context( ctx ); 417 GLuint64EXT tmp; 418 struct drm_i915_mmio io = { 419 .read_write = I915_MMIO_READ, 420 .reg = MMIO_REGS_PS_DEPTH_COUNT, 421 .data = &tmp 422 }; 423 intelFinish(&intel->ctx); 424 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io)); 425 q->Result = tmp - q->Result; 426 q->Ready = GL_TRUE; 427 intel->stats_wm--; 428} 429 430/** Driver-specific fence emit implementation for the fake memory manager. */ 431static unsigned int 432intel_fence_emit(void *private) 433{ 434 struct intel_context *intel = (struct intel_context *)private; 435 unsigned int fence; 436 437 /* XXX: Need to emit a flush, if we haven't already (at least with the 438 * current batchbuffer implementation, we have). 439 */ 440 441 fence = intelEmitIrqLocked(intel); 442 443 return fence; 444} 445 446/** Driver-specific fence wait implementation for the fake memory manager. */ 447static int 448intel_fence_wait(void *private, unsigned int cookie) 449{ 450 struct intel_context *intel = (struct intel_context *)private; 451 452 intelWaitIrq(intel, cookie); 453 454 return 0; 455} 456 457static GLboolean 458intel_init_bufmgr(struct intel_context *intel) 459{ 460 intelScreenPrivate *intelScreen = intel->intelScreen; 461 GLboolean gem_disable = getenv("INTEL_NO_GEM") != NULL; 462 GLboolean gem_supported; 463 464 /* If we've got a new enough DDX that's initializing GEM and giving us 465 * object handles for the shared buffers, use that. 466 */ 467 intel->ttm = GL_FALSE; 468 if (intel->intelScreen->driScrnPriv->dri2.enabled) 469 gem_supported = GL_TRUE; 470 else if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 && 471 intel->intelScreen->drmMinor >= 11 && 472 intel->intelScreen->front.bo_handle != -1) 473 gem_supported = GL_TRUE; 474 else 475 gem_supported = GL_FALSE; 476 477 if (!gem_disable && gem_supported) { 478 int bo_reuse_mode; 479 intel->bufmgr = intel_bufmgr_gem_init(intel->driFd, 480 BATCH_SZ); 481 if (intel->bufmgr != NULL) 482 intel->ttm = GL_TRUE; 483 484 bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse"); 485 switch (bo_reuse_mode) { 486 case DRI_CONF_BO_REUSE_DISABLED: 487 break; 488 case DRI_CONF_BO_REUSE_ALL: 489 intel_bufmgr_gem_enable_reuse(intel->bufmgr); 490 break; 491 } 492 } 493 /* Otherwise, use the classic buffer manager. */ 494 if (intel->bufmgr == NULL) { 495 if (gem_disable) { 496 fprintf(stderr, "GEM disabled. Using classic.\n"); 497 } else { 498 fprintf(stderr, "Failed to initialize GEM. " 499 "Falling back to classic.\n"); 500 } 501 502 if (intelScreen->tex.size == 0) { 503 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n", 504 __func__, __LINE__); 505 return GL_FALSE; 506 } 507 508 intel->bufmgr = intel_bufmgr_fake_init(intelScreen->tex.offset, 509 intelScreen->tex.map, 510 intelScreen->tex.size, 511 intel_fence_emit, 512 intel_fence_wait, 513 intel); 514 } 515 516 /* XXX bufmgr should be per-screen, not per-context */ 517 intelScreen->ttm = intel->ttm; 518 519 return GL_TRUE; 520} 521 522void 523intelInitDriverFunctions(struct dd_function_table *functions) 524{ 525 _mesa_init_driver_functions(functions); 526 527 functions->Flush = intelFlush; 528 functions->Finish = intelFinish; 529 functions->GetString = intelGetString; 530 functions->UpdateState = intelInvalidateState; 531 532 functions->CopyColorTable = _swrast_CopyColorTable; 533 functions->CopyColorSubTable = _swrast_CopyColorSubTable; 534 functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D; 535 functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D; 536 537 functions->BeginQuery = intelBeginQuery; 538 functions->EndQuery = intelEndQuery; 539 540 intelInitTextureFuncs(functions); 541 intelInitStateFuncs(functions); 542 intelInitBufferFuncs(functions); 543 intelInitPixelFuncs(functions); 544} 545 546 547GLboolean 548intelInitContext(struct intel_context *intel, 549 const __GLcontextModes * mesaVis, 550 __DRIcontextPrivate * driContextPriv, 551 void *sharedContextPrivate, 552 struct dd_function_table *functions) 553{ 554 GLcontext *ctx = &intel->ctx; 555 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate; 556 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv; 557 intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private; 558 volatile struct drm_i915_sarea *saPriv = (struct drm_i915_sarea *) 559 (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset); 560 int fthrottle_mode; 561 562 if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx, 563 functions, (void *) intel)) { 564 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__); 565 return GL_FALSE; 566 } 567 568 driContextPriv->driverPrivate = intel; 569 intel->intelScreen = intelScreen; 570 intel->driScreen = sPriv; 571 intel->sarea = saPriv; 572 573 /* Dri stuff */ 574 intel->hHWContext = driContextPriv->hHWContext; 575 intel->driFd = sPriv->fd; 576 intel->driHwLock = sPriv->lock; 577 578 intel->width = intelScreen->width; 579 intel->height = intelScreen->height; 580 581 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache, 582 intel->driScreen->myNum, 583 IS_965(intelScreen->deviceID) ? "i965" : "i915"); 584 if (intelScreen->deviceID == PCI_CHIP_I865_G) 585 intel->maxBatchSize = 4096; 586 else 587 intel->maxBatchSize = BATCH_SZ; 588 589 if (!intel_init_bufmgr(intel)) 590 return GL_FALSE; 591 592 ctx->Const.MaxTextureMaxAnisotropy = 2.0; 593 594 /* This doesn't yet catch all non-conformant rendering, but it's a 595 * start. 596 */ 597 if (getenv("INTEL_STRICT_CONFORMANCE")) { 598 intel->strict_conformance = 1; 599 } 600 601 if (intel->strict_conformance) { 602 ctx->Const.MinLineWidth = 1.0; 603 ctx->Const.MinLineWidthAA = 1.0; 604 ctx->Const.MaxLineWidth = 1.0; 605 ctx->Const.MaxLineWidthAA = 1.0; 606 ctx->Const.LineWidthGranularity = 1.0; 607 } 608 else { 609 ctx->Const.MinLineWidth = 1.0; 610 ctx->Const.MinLineWidthAA = 1.0; 611 ctx->Const.MaxLineWidth = 5.0; 612 ctx->Const.MaxLineWidthAA = 5.0; 613 ctx->Const.LineWidthGranularity = 0.5; 614 } 615 616 ctx->Const.MinPointSize = 1.0; 617 ctx->Const.MinPointSizeAA = 1.0; 618 ctx->Const.MaxPointSize = 255.0; 619 ctx->Const.MaxPointSizeAA = 3.0; 620 ctx->Const.PointSizeGranularity = 1.0; 621 622 /* reinitialize the context point state. 623 * It depend on constants in __GLcontextRec::Const 624 */ 625 _mesa_init_point(ctx); 626 627 ctx->Const.MaxColorAttachments = 4; /* XXX FBO: review this */ 628 629 /* Initialize the software rasterizer and helper modules. */ 630 _swrast_CreateContext(ctx); 631 _vbo_CreateContext(ctx); 632 _tnl_CreateContext(ctx); 633 _swsetup_CreateContext(ctx); 634 635 /* Configure swrast to match hardware characteristics: */ 636 _swrast_allow_pixel_fog(ctx, GL_FALSE); 637 _swrast_allow_vertex_fog(ctx, GL_TRUE); 638 639 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24; 640 intel->hw_stipple = 1; 641 642 /* XXX FBO: this doesn't seem to be used anywhere */ 643 switch (mesaVis->depthBits) { 644 case 0: /* what to do in this case? */ 645 case 16: 646 intel->polygon_offset_scale = 1.0; 647 break; 648 case 24: 649 intel->polygon_offset_scale = 2.0; /* req'd to pass glean */ 650 break; 651 default: 652 assert(0); 653 break; 654 } 655 656 if (IS_965(intelScreen->deviceID)) 657 intel->polygon_offset_scale /= 0xffff; 658 659 intel->RenderIndex = ~0; 660 661 fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode"); 662 intel->irqsEmitted = 0; 663 664 intel->do_irqs = (intel->intelScreen->irq_active && 665 fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS); 666 667 intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS); 668 669 _math_matrix_ctr(&intel->ViewportMatrix); 670 671 if (IS_965(intelScreen->deviceID) && !intel->intelScreen->irq_active) { 672 _mesa_printf("IRQs not active. Exiting\n"); 673 exit(1); 674 } 675 676 intelInitExtensions(ctx, GL_FALSE); 677 678 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control); 679 if (INTEL_DEBUG & DEBUG_BUFMGR) 680 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE); 681 682 if (!sPriv->dri2.enabled) 683 intel_recreate_static_regions(intel); 684 685 intel->batch = intel_batchbuffer_alloc(intel); 686 687 intel_bufferobj_init(intel); 688 intel_fbo_init(intel); 689 690 if (intel->ctx.Mesa_DXTn) { 691 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc"); 692 _mesa_enable_extension(ctx, "GL_S3_s3tc"); 693 } 694 else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) { 695 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc"); 696 } 697 698 intel->prim.primitive = ~0; 699 700 /* Force all software fallbacks */ 701 if (driQueryOptionb(&intel->optionCache, "no_rast")) { 702 fprintf(stderr, "disabling 3D rasterization\n"); 703 intel->no_rast = 1; 704 } 705 706 intel->tiling_swizzle_mode = driQueryOptioni(&intel->optionCache, 707 "swizzle_mode"); 708 709 /* Disable all hardware rendering (skip emitting batches and fences/waits 710 * to the kernel) 711 */ 712 intel->no_hw = getenv("INTEL_NO_HW") != NULL; 713 714 return GL_TRUE; 715} 716 717void 718intelDestroyContext(__DRIcontextPrivate * driContextPriv) 719{ 720 struct intel_context *intel = 721 (struct intel_context *) driContextPriv->driverPrivate; 722 723 assert(intel); /* should never be null */ 724 if (intel) { 725 GLboolean release_texture_heaps; 726 727 INTEL_FIREVERTICES(intel); 728 729 intel->vtbl.destroy(intel); 730 731 release_texture_heaps = (intel->ctx.Shared->RefCount == 1); 732 _swsetup_DestroyContext(&intel->ctx); 733 _tnl_DestroyContext(&intel->ctx); 734 _vbo_DestroyContext(&intel->ctx); 735 736 _swrast_DestroyContext(&intel->ctx); 737 intel->Fallback = 0; /* don't call _swrast_Flush later */ 738 739 intel_batchbuffer_free(intel->batch); 740 free(intel->prim.vb); 741 742 if (release_texture_heaps) { 743 /* This share group is about to go away, free our private 744 * texture object data. 745 */ 746 if (INTEL_DEBUG & DEBUG_TEXTURE) 747 fprintf(stderr, "do something to free texture heaps\n"); 748 } 749 750 /* free the Mesa context */ 751 _mesa_free_context_data(&intel->ctx); 752 753 dri_bufmgr_destroy(intel->bufmgr); 754 } 755} 756 757GLboolean 758intelUnbindContext(__DRIcontextPrivate * driContextPriv) 759{ 760 return GL_TRUE; 761} 762 763GLboolean 764intelMakeCurrent(__DRIcontextPrivate * driContextPriv, 765 __DRIdrawablePrivate * driDrawPriv, 766 __DRIdrawablePrivate * driReadPriv) 767{ 768 __DRIscreenPrivate *psp = driDrawPriv->driScreenPriv; 769 770 if (driContextPriv) { 771 struct intel_context *intel = 772 (struct intel_context *) driContextPriv->driverPrivate; 773 struct intel_framebuffer *intel_fb = 774 (struct intel_framebuffer *) driDrawPriv->driverPrivate; 775 GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate; 776 777 778 /* XXX FBO temporary fix-ups! */ 779 /* if the renderbuffers don't have regions, init them from the context */ 780 if (!driContextPriv->driScreenPriv->dri2.enabled) { 781 struct intel_renderbuffer *irbDepth 782 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH); 783 struct intel_renderbuffer *irbStencil 784 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL); 785 786 if (intel_fb->color_rb[0]) { 787 intel_renderbuffer_set_region(intel_fb->color_rb[0], 788 intel->front_region); 789 } 790 if (intel_fb->color_rb[1]) { 791 intel_renderbuffer_set_region(intel_fb->color_rb[1], 792 intel->back_region); 793 } 794#if 0 795 if (intel_fb->color_rb[2]) { 796 intel_renderbuffer_set_region(intel_fb->color_rb[2], 797 intel->third_region); 798 } 799#endif 800 if (irbDepth) { 801 intel_renderbuffer_set_region(irbDepth, intel->depth_region); 802 } 803 if (irbStencil) { 804 intel_renderbuffer_set_region(irbStencil, intel->depth_region); 805 } 806 } 807 808 /* set GLframebuffer size to match window, if needed */ 809 driUpdateFramebufferSize(&intel->ctx, driDrawPriv); 810 811 if (driReadPriv != driDrawPriv) { 812 driUpdateFramebufferSize(&intel->ctx, driReadPriv); 813 } 814 815 _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb); 816 817 /* The drawbuffer won't always be updated by _mesa_make_current: 818 */ 819 if (intel->ctx.DrawBuffer == &intel_fb->Base) { 820 821 if (intel->driReadDrawable != driReadPriv) 822 intel->driReadDrawable = driReadPriv; 823 824 if (intel->driDrawable != driDrawPriv) { 825 if (driDrawPriv->swap_interval == (unsigned)-1) { 826 int i; 827 828 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0) 829 ? driGetDefaultVBlankFlags(&intel->optionCache) 830 : VBLANK_FLAG_NO_IRQ; 831 832 (*psp->systemTime->getUST) (&intel_fb->swap_ust); 833 driDrawableInitVBlank(driDrawPriv); 834 intel_fb->vbl_waited = driDrawPriv->vblSeq; 835 836 for (i = 0; i < (intel->intelScreen->third.handle ? 3 : 2); i++) { 837 if (intel_fb->color_rb[i]) 838 intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq; 839 } 840 } 841 intel->driDrawable = driDrawPriv; 842 intelWindowMoved(intel); 843 } 844 845 intel_draw_buffer(&intel->ctx, &intel_fb->Base); 846 } 847 } 848 else { 849 _mesa_make_current(NULL, NULL, NULL); 850 } 851 852 return GL_TRUE; 853} 854 855static void 856intelContendedLock(struct intel_context *intel, GLuint flags) 857{ 858 __DRIdrawablePrivate *dPriv = intel->driDrawable; 859 __DRIscreenPrivate *sPriv = intel->driScreen; 860 volatile struct drm_i915_sarea *sarea = intel->sarea; 861 int me = intel->hHWContext; 862 863 drmGetLock(intel->driFd, intel->hHWContext, flags); 864 intel->locked = 1; 865 866 if (INTEL_DEBUG & DEBUG_LOCK) 867 _mesa_printf("%s - got contended lock\n", __progname); 868 869 /* If the window moved, may need to set a new cliprect now. 870 * 871 * NOTE: This releases and regains the hw lock, so all state 872 * checking must be done *after* this call: 873 */ 874 if (dPriv) 875 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv); 876 877 if (sarea && sarea->ctxOwner != me) { 878 if (INTEL_DEBUG & DEBUG_BUFMGR) { 879 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n", 880 sarea->ctxOwner, me); 881 } 882 sarea->ctxOwner = me; 883 } 884 885 /* If the last consumer of the texture memory wasn't us, notify the fake 886 * bufmgr and record the new owner. We should have the memory shared 887 * between contexts of a single fake bufmgr, but this will at least make 888 * things correct for now. 889 */ 890 if (!intel->ttm && sarea->texAge != intel->hHWContext) { 891 sarea->texAge = intel->hHWContext; 892 intel_bufmgr_fake_contended_lock_take(intel->bufmgr); 893 if (INTEL_DEBUG & DEBUG_BATCH) 894 intel_decode_context_reset(); 895 if (INTEL_DEBUG & DEBUG_BUFMGR) 896 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n", 897 sarea->ctxOwner, intel->hHWContext); 898 } 899 900 if (sarea->width != intel->width || sarea->height != intel->height) { 901 int numClipRects = intel->numClipRects; 902 903 /* 904 * FIXME: Really only need to do this when drawing to a 905 * common back- or front buffer. 906 */ 907 908 /* 909 * This will essentially drop the outstanding batchbuffer on 910 * the floor. 911 */ 912 intel->numClipRects = 0; 913 914 if (intel->Fallback) 915 _swrast_flush(&intel->ctx); 916 917 if (!IS_965(intel->intelScreen->deviceID)) 918 INTEL_FIREVERTICES(intel); 919 920 if (intel->batch->map != intel->batch->ptr) 921 intel_batchbuffer_flush(intel->batch); 922 923 intel->numClipRects = numClipRects; 924 925 /* force window update */ 926 intel->lastStamp = 0; 927 928 intel->width = sarea->width; 929 intel->height = sarea->height; 930 } 931 932 /* Drawable changed? 933 */ 934 if (dPriv && intel->lastStamp != dPriv->lastStamp) { 935 intelWindowMoved(intel); 936 intel->lastStamp = dPriv->lastStamp; 937 } 938} 939 940 941_glthread_DECLARE_STATIC_MUTEX(lockMutex); 942 943/* Lock the hardware and validate our state. 944 */ 945void LOCK_HARDWARE( struct intel_context *intel ) 946{ 947 __DRIdrawable *dPriv = intel->driDrawable; 948 __DRIscreen *sPriv = intel->driScreen; 949 char __ret = 0; 950 struct intel_framebuffer *intel_fb = NULL; 951 struct intel_renderbuffer *intel_rb = NULL; 952 953 _glthread_LOCK_MUTEX(lockMutex); 954 assert(!intel->locked); 955 intel->locked = 1; 956 957 if (intel->driDrawable) { 958 intel_fb = intel->driDrawable->driverPrivate; 959 960 if (intel_fb) 961 intel_rb = 962 intel_get_renderbuffer(&intel_fb->Base, 963 intel_fb->Base._ColorDrawBufferIndexes[0]); 964 } 965 966 if (intel_rb && dPriv->vblFlags && 967 !(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) && 968 (intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) { 969 drmVBlank vbl; 970 971 vbl.request.type = DRM_VBLANK_ABSOLUTE; 972 973 if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) { 974 vbl.request.type |= DRM_VBLANK_SECONDARY; 975 } 976 977 vbl.request.sequence = intel_rb->vbl_pending; 978 drmWaitVBlank(intel->driFd, &vbl); 979 intel_fb->vbl_waited = vbl.reply.sequence; 980 } 981 982 DRM_CAS(intel->driHwLock, intel->hHWContext, 983 (DRM_LOCK_HELD|intel->hHWContext), __ret); 984 985 if (sPriv->dri2.enabled) { 986 if (__ret) 987 drmGetLock(intel->driFd, intel->hHWContext, 0); 988 if (__driParseEvents(dPriv->driContextPriv, dPriv)) { 989 intelWindowMoved(intel); 990 intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer); 991 } 992 } else if (__ret) { 993 intelContendedLock( intel, 0 ); 994 } 995 996 997 if (INTEL_DEBUG & DEBUG_LOCK) 998 _mesa_printf("%s - locked\n", __progname); 999} 1000 1001 1002/* Unlock the hardware using the global current context 1003 */ 1004void UNLOCK_HARDWARE( struct intel_context *intel ) 1005{ 1006 intel->vtbl.note_unlock( intel ); 1007 intel->locked = 0; 1008 1009 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext); 1010 1011 _glthread_UNLOCK_MUTEX(lockMutex); 1012 1013 if (INTEL_DEBUG & DEBUG_LOCK) 1014 _mesa_printf("%s - unlocked\n", __progname); 1015 1016 /** 1017 * Nothing should be left in batch outside of LOCK/UNLOCK which references 1018 * cliprects. 1019 */ 1020 if (intel->batch->cliprect_mode == REFERENCES_CLIPRECTS) 1021 intel_batchbuffer_flush(intel->batch); 1022} 1023 1024