brw_context.c revision 410fea8dd9cc0b1b500477a8b6f68c950f7c895a
1/* 2 Copyright 2003 VMware, Inc. 3 Copyright (C) Intel Corp. 2006. All Rights Reserved. 4 Intel funded Tungsten Graphics to 5 develop this 3D driver. 6 7 Permission is hereby granted, free of charge, to any person obtaining 8 a copy of this software and associated documentation files (the 9 "Software"), to deal in the Software without restriction, including 10 without limitation the rights to use, copy, modify, merge, publish, 11 distribute, sublicense, and/or sell copies of the Software, and to 12 permit persons to whom the Software is furnished to do so, subject to 13 the following conditions: 14 15 The above copyright notice and this permission notice (including the 16 next paragraph) shall be included in all copies or substantial 17 portions of the Software. 18 19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 **********************************************************************/ 28 /* 29 * Authors: 30 * Keith Whitwell <keithw@vmware.com> 31 */ 32 33 34#include "main/api_exec.h" 35#include "main/context.h" 36#include "main/fbobject.h" 37#include "main/extensions.h" 38#include "main/imports.h" 39#include "main/macros.h" 40#include "main/points.h" 41#include "main/version.h" 42#include "main/vtxfmt.h" 43 44#include "vbo/vbo_context.h" 45 46#include "drivers/common/driverfuncs.h" 47#include "drivers/common/meta.h" 48#include "utils.h" 49 50#include "brw_context.h" 51#include "brw_defines.h" 52#include "brw_draw.h" 53#include "brw_state.h" 54 55#include "intel_batchbuffer.h" 56#include "intel_buffer_objects.h" 57#include "intel_buffers.h" 58#include "intel_fbo.h" 59#include "intel_mipmap_tree.h" 60#include "intel_pixel.h" 61#include "intel_image.h" 62#include "intel_tex.h" 63#include "intel_tex_obj.h" 64 65#include "swrast_setup/swrast_setup.h" 66#include "tnl/tnl.h" 67#include "tnl/t_pipeline.h" 68#include "util/ralloc.h" 69 70/*************************************** 71 * Mesa's Driver Functions 72 ***************************************/ 73 74static size_t 75brw_query_samples_for_format(struct gl_context *ctx, GLenum target, 76 GLenum internalFormat, int samples[16]) 77{ 78 struct brw_context *brw = brw_context(ctx); 79 80 (void) target; 81 82 switch (brw->gen) { 83 case 8: 84 samples[0] = 8; 85 samples[1] = 4; 86 samples[2] = 2; 87 return 3; 88 89 case 7: 90 samples[0] = 8; 91 samples[1] = 4; 92 return 2; 93 94 case 6: 95 samples[0] = 4; 96 return 1; 97 98 default: 99 samples[0] = 1; 100 return 1; 101 } 102} 103 104const char *const brw_vendor_string = "Intel Open Source Technology Center"; 105 106const char * 107brw_get_renderer_string(unsigned deviceID) 108{ 109 const char *chipset; 110 static char buffer[128]; 111 112 switch (deviceID) { 113#undef CHIPSET 114#define CHIPSET(id, symbol, str) case id: chipset = str; break; 115#include "pci_ids/i965_pci_ids.h" 116 default: 117 chipset = "Unknown Intel Chipset"; 118 break; 119 } 120 121 (void) driGetRendererString(buffer, chipset, 0); 122 return buffer; 123} 124 125static const GLubyte * 126intelGetString(struct gl_context * ctx, GLenum name) 127{ 128 const struct brw_context *const brw = brw_context(ctx); 129 130 switch (name) { 131 case GL_VENDOR: 132 return (GLubyte *) brw_vendor_string; 133 134 case GL_RENDERER: 135 return 136 (GLubyte *) brw_get_renderer_string(brw->intelScreen->deviceID); 137 138 default: 139 return NULL; 140 } 141} 142 143static void 144intel_viewport(struct gl_context *ctx) 145{ 146 struct brw_context *brw = brw_context(ctx); 147 __DRIcontext *driContext = brw->driContext; 148 149 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) { 150 dri2InvalidateDrawable(driContext->driDrawablePriv); 151 dri2InvalidateDrawable(driContext->driReadablePriv); 152 } 153} 154 155static void 156intelInvalidateState(struct gl_context * ctx, GLuint new_state) 157{ 158 struct brw_context *brw = brw_context(ctx); 159 160 if (ctx->swrast_context) 161 _swrast_InvalidateState(ctx, new_state); 162 _vbo_InvalidateState(ctx, new_state); 163 164 brw->NewGLState |= new_state; 165} 166 167#define flushFront(screen) ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer) 168 169static void 170intel_flush_front(struct gl_context *ctx) 171{ 172 struct brw_context *brw = brw_context(ctx); 173 __DRIcontext *driContext = brw->driContext; 174 __DRIdrawable *driDrawable = driContext->driDrawablePriv; 175 __DRIscreen *const screen = brw->intelScreen->driScrnPriv; 176 177 if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) { 178 if (flushFront(screen) && driDrawable && 179 driDrawable->loaderPrivate) { 180 181 /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT. 182 * 183 * This potentially resolves both front and back buffer. It 184 * is unnecessary to resolve the back, but harms nothing except 185 * performance. And no one cares about front-buffer render 186 * performance. 187 */ 188 intel_resolve_for_dri2_flush(brw, driDrawable); 189 intel_batchbuffer_flush(brw); 190 191 flushFront(screen)(driDrawable, driDrawable->loaderPrivate); 192 193 /* We set the dirty bit in intel_prepare_render() if we're 194 * front buffer rendering once we get there. 195 */ 196 brw->front_buffer_dirty = false; 197 } 198 } 199} 200 201static void 202intel_glFlush(struct gl_context *ctx) 203{ 204 struct brw_context *brw = brw_context(ctx); 205 206 intel_batchbuffer_flush(brw); 207 intel_flush_front(ctx); 208 if (brw_is_front_buffer_drawing(ctx->DrawBuffer)) 209 brw->need_throttle = true; 210} 211 212void 213intelFinish(struct gl_context * ctx) 214{ 215 struct brw_context *brw = brw_context(ctx); 216 217 intel_glFlush(ctx); 218 219 if (brw->batch.last_bo) 220 drm_intel_bo_wait_rendering(brw->batch.last_bo); 221} 222 223static void 224brw_init_driver_functions(struct brw_context *brw, 225 struct dd_function_table *functions) 226{ 227 _mesa_init_driver_functions(functions); 228 229 /* GLX uses DRI2 invalidate events to handle window resizing. 230 * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib), 231 * which doesn't provide a mechanism for snooping the event queues. 232 * 233 * So EGL still relies on viewport hacks to handle window resizing. 234 * This should go away with DRI3000. 235 */ 236 if (!brw->driContext->driScreenPriv->dri2.useInvalidate) 237 functions->Viewport = intel_viewport; 238 239 functions->Flush = intel_glFlush; 240 functions->Finish = intelFinish; 241 functions->GetString = intelGetString; 242 functions->UpdateState = intelInvalidateState; 243 244 intelInitTextureFuncs(functions); 245 intelInitTextureImageFuncs(functions); 246 intelInitTextureSubImageFuncs(functions); 247 intelInitTextureCopyImageFuncs(functions); 248 intelInitCopyImageFuncs(functions); 249 intelInitClearFuncs(functions); 250 intelInitBufferFuncs(functions); 251 intelInitPixelFuncs(functions); 252 intelInitBufferObjectFuncs(functions); 253 intel_init_syncobj_functions(functions); 254 brw_init_object_purgeable_functions(functions); 255 256 brwInitFragProgFuncs( functions ); 257 brw_init_common_queryobj_functions(functions); 258 if (brw->gen >= 6) 259 gen6_init_queryobj_functions(functions); 260 else 261 gen4_init_queryobj_functions(functions); 262 263 functions->QuerySamplesForFormat = brw_query_samples_for_format; 264 265 functions->NewTransformFeedback = brw_new_transform_feedback; 266 functions->DeleteTransformFeedback = brw_delete_transform_feedback; 267 functions->GetTransformFeedbackVertexCount = 268 brw_get_transform_feedback_vertex_count; 269 if (brw->gen >= 7) { 270 functions->BeginTransformFeedback = gen7_begin_transform_feedback; 271 functions->EndTransformFeedback = gen7_end_transform_feedback; 272 functions->PauseTransformFeedback = gen7_pause_transform_feedback; 273 functions->ResumeTransformFeedback = gen7_resume_transform_feedback; 274 } else { 275 functions->BeginTransformFeedback = brw_begin_transform_feedback; 276 functions->EndTransformFeedback = brw_end_transform_feedback; 277 } 278 279 if (brw->gen >= 6) 280 functions->GetSamplePosition = gen6_get_sample_position; 281} 282 283static void 284brw_initialize_context_constants(struct brw_context *brw) 285{ 286 struct gl_context *ctx = &brw->ctx; 287 288 unsigned max_samplers = 289 brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16; 290 291 ctx->Const.QueryCounterBits.Timestamp = 36; 292 293 ctx->Const.StripTextureBorder = true; 294 295 ctx->Const.MaxDualSourceDrawBuffers = 1; 296 ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS; 297 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = max_samplers; 298 ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */ 299 ctx->Const.MaxTextureUnits = 300 MIN2(ctx->Const.MaxTextureCoordUnits, 301 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits); 302 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = max_samplers; 303 if (brw->gen >= 7) 304 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = max_samplers; 305 else 306 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 0; 307 if (_mesa_extension_override_enables.ARB_compute_shader) { 308 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = BRW_MAX_TEX_UNIT; 309 ctx->Const.MaxUniformBufferBindings += 12; 310 } else { 311 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = 0; 312 } 313 ctx->Const.MaxCombinedTextureImageUnits = 314 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits + 315 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits + 316 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits + 317 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits; 318 319 ctx->Const.MaxTextureLevels = 14; /* 8192 */ 320 if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS) 321 ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS; 322 ctx->Const.Max3DTextureLevels = 12; /* 2048 */ 323 ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */ 324 ctx->Const.MaxTextureMbytes = 1536; 325 326 if (brw->gen >= 7) 327 ctx->Const.MaxArrayTextureLayers = 2048; 328 else 329 ctx->Const.MaxArrayTextureLayers = 512; 330 331 ctx->Const.MaxTextureRectSize = 1 << 12; 332 333 ctx->Const.MaxTextureMaxAnisotropy = 16.0; 334 335 ctx->Const.MaxRenderbufferSize = 8192; 336 337 /* Hardware only supports a limited number of transform feedback buffers. 338 * So we need to override the Mesa default (which is based only on software 339 * limits). 340 */ 341 ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS; 342 343 /* On Gen6, in the worst case, we use up one binding table entry per 344 * transform feedback component (see comments above the definition of 345 * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value 346 * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to 347 * BRW_MAX_SOL_BINDINGS. 348 * 349 * In "separate components" mode, we need to divide this value by 350 * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries 351 * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS. 352 */ 353 ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS; 354 ctx->Const.MaxTransformFeedbackSeparateComponents = 355 BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS; 356 357 ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true; 358 359 int max_samples; 360 const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen); 361 const int clamp_max_samples = 362 driQueryOptioni(&brw->optionCache, "clamp_max_samples"); 363 364 if (clamp_max_samples < 0) { 365 max_samples = msaa_modes[0]; 366 } else { 367 /* Select the largest supported MSAA mode that does not exceed 368 * clamp_max_samples. 369 */ 370 max_samples = 0; 371 for (int i = 0; msaa_modes[i] != 0; ++i) { 372 if (msaa_modes[i] <= clamp_max_samples) { 373 max_samples = msaa_modes[i]; 374 break; 375 } 376 } 377 } 378 379 ctx->Const.MaxSamples = max_samples; 380 ctx->Const.MaxColorTextureSamples = max_samples; 381 ctx->Const.MaxDepthTextureSamples = max_samples; 382 ctx->Const.MaxIntegerSamples = max_samples; 383 384 if (brw->gen >= 7) 385 ctx->Const.MaxProgramTextureGatherComponents = 4; 386 else if (brw->gen == 6) 387 ctx->Const.MaxProgramTextureGatherComponents = 1; 388 389 ctx->Const.MinLineWidth = 1.0; 390 ctx->Const.MinLineWidthAA = 1.0; 391 ctx->Const.MaxLineWidth = 5.0; 392 ctx->Const.MaxLineWidthAA = 5.0; 393 ctx->Const.LineWidthGranularity = 0.5; 394 395 ctx->Const.MinPointSize = 1.0; 396 ctx->Const.MinPointSizeAA = 1.0; 397 ctx->Const.MaxPointSize = 255.0; 398 ctx->Const.MaxPointSizeAA = 255.0; 399 ctx->Const.PointSizeGranularity = 1.0; 400 401 if (brw->gen >= 5 || brw->is_g4x) 402 ctx->Const.MaxClipPlanes = 8; 403 404 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024; 405 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0; 406 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0; 407 ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0; 408 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0; 409 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0; 410 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0; 411 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16; 412 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256; 413 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1; 414 ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024; 415 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams = 416 MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters, 417 ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams); 418 419 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024; 420 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024; 421 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024; 422 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024; 423 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12; 424 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256; 425 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0; 426 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024; 427 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams = 428 MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters, 429 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams); 430 431 /* Fragment shaders use real, 32-bit twos-complement integers for all 432 * integer types. 433 */ 434 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31; 435 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30; 436 ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0; 437 ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt; 438 ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt; 439 440 if (brw->gen >= 7) { 441 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters = MAX_ATOMIC_COUNTERS; 442 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters = MAX_ATOMIC_COUNTERS; 443 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters = MAX_ATOMIC_COUNTERS; 444 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicCounters = MAX_ATOMIC_COUNTERS; 445 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers = BRW_MAX_ABO; 446 ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers = BRW_MAX_ABO; 447 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers = BRW_MAX_ABO; 448 ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicBuffers = BRW_MAX_ABO; 449 ctx->Const.MaxCombinedAtomicBuffers = 3 * BRW_MAX_ABO; 450 } 451 452 /* Gen6 converts quads to polygon in beginning of 3D pipeline, 453 * but we're not sure how it's actually done for vertex order, 454 * that affect provoking vertex decision. Always use last vertex 455 * convention for quad primitive which works as expected for now. 456 */ 457 if (brw->gen >= 6) 458 ctx->Const.QuadsFollowProvokingVertexConvention = false; 459 460 ctx->Const.NativeIntegers = true; 461 ctx->Const.UniformBooleanTrue = 1; 462 463 /* From the gen4 PRM, volume 4 page 127: 464 * 465 * "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies 466 * the base address of the first element of the surface, computed in 467 * software by adding the surface base address to the byte offset of 468 * the element in the buffer." 469 * 470 * However, unaligned accesses are slower, so enforce buffer alignment. 471 */ 472 ctx->Const.UniformBufferOffsetAlignment = 16; 473 ctx->Const.TextureBufferOffsetAlignment = 16; 474 475 if (brw->gen >= 6) { 476 ctx->Const.MaxVarying = 32; 477 ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128; 478 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64; 479 ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128; 480 ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128; 481 } 482 483 /* We want the GLSL compiler to emit code that uses condition codes */ 484 for (int i = 0; i < MESA_SHADER_STAGES; i++) { 485 ctx->ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX; 486 ctx->ShaderCompilerOptions[i].EmitCondCodes = true; 487 ctx->ShaderCompilerOptions[i].EmitNoNoise = true; 488 ctx->ShaderCompilerOptions[i].EmitNoMainReturn = true; 489 ctx->ShaderCompilerOptions[i].EmitNoIndirectInput = true; 490 ctx->ShaderCompilerOptions[i].EmitNoIndirectOutput = 491 (i == MESA_SHADER_FRAGMENT); 492 ctx->ShaderCompilerOptions[i].EmitNoIndirectTemp = 493 (i == MESA_SHADER_FRAGMENT); 494 ctx->ShaderCompilerOptions[i].EmitNoIndirectUniform = false; 495 ctx->ShaderCompilerOptions[i].LowerClipDistance = true; 496 } 497 498 ctx->ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true; 499 ctx->ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true; 500 501 /* ARB_viewport_array */ 502 if (brw->gen >= 7 && ctx->API == API_OPENGL_CORE) { 503 ctx->Const.MaxViewports = GEN7_NUM_VIEWPORTS; 504 ctx->Const.ViewportSubpixelBits = 0; 505 506 /* Cast to float before negating becuase MaxViewportWidth is unsigned. 507 */ 508 ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth; 509 ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth; 510 } 511 512 /* ARB_gpu_shader5 */ 513 if (brw->gen >= 7) 514 ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS); 515} 516 517/** 518 * Process driconf (drirc) options, setting appropriate context flags. 519 * 520 * intelInitExtensions still pokes at optionCache directly, in order to 521 * avoid advertising various extensions. No flags are set, so it makes 522 * sense to continue doing that there. 523 */ 524static void 525brw_process_driconf_options(struct brw_context *brw) 526{ 527 struct gl_context *ctx = &brw->ctx; 528 529 driOptionCache *options = &brw->optionCache; 530 driParseConfigFiles(options, &brw->intelScreen->optionCache, 531 brw->driContext->driScreenPriv->myNum, "i965"); 532 533 int bo_reuse_mode = driQueryOptioni(options, "bo_reuse"); 534 switch (bo_reuse_mode) { 535 case DRI_CONF_BO_REUSE_DISABLED: 536 break; 537 case DRI_CONF_BO_REUSE_ALL: 538 intel_bufmgr_gem_enable_reuse(brw->bufmgr); 539 break; 540 } 541 542 if (!driQueryOptionb(options, "hiz")) { 543 brw->has_hiz = false; 544 /* On gen6, you can only do separate stencil with HIZ. */ 545 if (brw->gen == 6) 546 brw->has_separate_stencil = false; 547 } 548 549 if (driQueryOptionb(options, "always_flush_batch")) { 550 fprintf(stderr, "flushing batchbuffer before/after each draw call\n"); 551 brw->always_flush_batch = true; 552 } 553 554 if (driQueryOptionb(options, "always_flush_cache")) { 555 fprintf(stderr, "flushing GPU caches before/after each draw call\n"); 556 brw->always_flush_cache = true; 557 } 558 559 if (driQueryOptionb(options, "disable_throttling")) { 560 fprintf(stderr, "disabling flush throttling\n"); 561 brw->disable_throttling = true; 562 } 563 564 brw->disable_derivative_optimization = 565 driQueryOptionb(&brw->optionCache, "disable_derivative_optimization"); 566 567 brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile"); 568 569 ctx->Const.ForceGLSLExtensionsWarn = 570 driQueryOptionb(options, "force_glsl_extensions_warn"); 571 572 ctx->Const.DisableGLSLLineContinuations = 573 driQueryOptionb(options, "disable_glsl_line_continuations"); 574 575 ctx->Const.AllowGLSLExtensionDirectiveMidShader = 576 driQueryOptionb(options, "allow_glsl_extension_directive_midshader"); 577} 578 579GLboolean 580brwCreateContext(gl_api api, 581 const struct gl_config *mesaVis, 582 __DRIcontext *driContextPriv, 583 unsigned major_version, 584 unsigned minor_version, 585 uint32_t flags, 586 bool notify_reset, 587 unsigned *dri_ctx_error, 588 void *sharedContextPrivate) 589{ 590 __DRIscreen *sPriv = driContextPriv->driScreenPriv; 591 struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate; 592 struct intel_screen *screen = sPriv->driverPrivate; 593 const struct brw_device_info *devinfo = screen->devinfo; 594 struct dd_function_table functions; 595 596 /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel 597 * provides us with context reset notifications. 598 */ 599 uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG 600 | __DRI_CTX_FLAG_FORWARD_COMPATIBLE; 601 602 if (screen->has_context_reset_notification) 603 allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS; 604 605 if (flags & ~allowed_flags) { 606 *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG; 607 return false; 608 } 609 610 struct brw_context *brw = rzalloc(NULL, struct brw_context); 611 if (!brw) { 612 fprintf(stderr, "%s: failed to alloc context\n", __FUNCTION__); 613 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY; 614 return false; 615 } 616 617 driContextPriv->driverPrivate = brw; 618 brw->driContext = driContextPriv; 619 brw->intelScreen = screen; 620 brw->bufmgr = screen->bufmgr; 621 622 brw->gen = devinfo->gen; 623 brw->gt = devinfo->gt; 624 brw->is_g4x = devinfo->is_g4x; 625 brw->is_baytrail = devinfo->is_baytrail; 626 brw->is_haswell = devinfo->is_haswell; 627 brw->is_cherryview = devinfo->is_cherryview; 628 brw->has_llc = devinfo->has_llc; 629 brw->has_hiz = devinfo->has_hiz_and_separate_stencil; 630 brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil; 631 brw->has_pln = devinfo->has_pln; 632 brw->has_compr4 = devinfo->has_compr4; 633 brw->has_surface_tile_offset = devinfo->has_surface_tile_offset; 634 brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug; 635 brw->needs_unlit_centroid_workaround = 636 devinfo->needs_unlit_centroid_workaround; 637 638 brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil; 639 brw->has_swizzling = screen->hw_has_swizzling; 640 641 brw->vs.base.stage = MESA_SHADER_VERTEX; 642 brw->gs.base.stage = MESA_SHADER_GEOMETRY; 643 brw->wm.base.stage = MESA_SHADER_FRAGMENT; 644 if (brw->gen >= 8) { 645 gen8_init_vtable_surface_functions(brw); 646 brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz; 647 } else if (brw->gen >= 7) { 648 gen7_init_vtable_surface_functions(brw); 649 brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz; 650 } else { 651 gen4_init_vtable_surface_functions(brw); 652 brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz; 653 } 654 655 brw_init_driver_functions(brw, &functions); 656 657 if (notify_reset) 658 functions.GetGraphicsResetStatus = brw_get_graphics_reset_status; 659 660 struct gl_context *ctx = &brw->ctx; 661 662 if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) { 663 *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY; 664 fprintf(stderr, "%s: failed to init mesa context\n", __FUNCTION__); 665 intelDestroyContext(driContextPriv); 666 return false; 667 } 668 669 driContextSetFlags(ctx, flags); 670 671 /* Initialize the software rasterizer and helper modules. 672 * 673 * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for 674 * software fallbacks (which we have to support on legacy GL to do weird 675 * glDrawPixels(), glBitmap(), and other functions). 676 */ 677 if (api != API_OPENGL_CORE && api != API_OPENGLES2) { 678 _swrast_CreateContext(ctx); 679 } 680 681 _vbo_CreateContext(ctx); 682 if (ctx->swrast_context) { 683 _tnl_CreateContext(ctx); 684 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline; 685 _swsetup_CreateContext(ctx); 686 687 /* Configure swrast to match hardware characteristics: */ 688 _swrast_allow_pixel_fog(ctx, false); 689 _swrast_allow_vertex_fog(ctx, true); 690 } 691 692 _mesa_meta_init(ctx); 693 694 brw_process_driconf_options(brw); 695 brw_process_intel_debug_variable(brw); 696 brw_initialize_context_constants(brw); 697 698 ctx->Const.ResetStrategy = notify_reset 699 ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB; 700 701 /* Reinitialize the context point state. It depends on ctx->Const values. */ 702 _mesa_init_point(ctx); 703 704 intel_fbo_init(brw); 705 706 intel_batchbuffer_init(brw); 707 708 if (brw->gen >= 6) { 709 /* Create a new hardware context. Using a hardware context means that 710 * our GPU state will be saved/restored on context switch, allowing us 711 * to assume that the GPU is in the same state we left it in. 712 * 713 * This is required for transform feedback buffer offsets, query objects, 714 * and also allows us to reduce how much state we have to emit. 715 */ 716 brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr); 717 718 if (!brw->hw_ctx) { 719 fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n"); 720 intelDestroyContext(driContextPriv); 721 return false; 722 } 723 } 724 725 brw_init_state(brw); 726 727 intelInitExtensions(ctx); 728 729 brw_init_surface_formats(brw); 730 731 brw->max_vs_threads = devinfo->max_vs_threads; 732 brw->max_gs_threads = devinfo->max_gs_threads; 733 brw->max_wm_threads = devinfo->max_wm_threads; 734 brw->urb.size = devinfo->urb.size; 735 brw->urb.min_vs_entries = devinfo->urb.min_vs_entries; 736 brw->urb.max_vs_entries = devinfo->urb.max_vs_entries; 737 brw->urb.max_gs_entries = devinfo->urb.max_gs_entries; 738 739 /* Estimate the size of the mappable aperture into the GTT. There's an 740 * ioctl to get the whole GTT size, but not one to get the mappable subset. 741 * It turns out it's basically always 256MB, though some ancient hardware 742 * was smaller. 743 */ 744 uint32_t gtt_size = 256 * 1024 * 1024; 745 746 /* We don't want to map two objects such that a memcpy between them would 747 * just fault one mapping in and then the other over and over forever. So 748 * we would need to divide the GTT size by 2. Additionally, some GTT is 749 * taken up by things like the framebuffer and the ringbuffer and such, so 750 * be more conservative. 751 */ 752 brw->max_gtt_map_object_size = gtt_size / 4; 753 754 if (brw->gen == 6) 755 brw->urb.gen6_gs_previously_active = false; 756 757 brw->prim_restart.in_progress = false; 758 brw->prim_restart.enable_cut_index = false; 759 brw->gs.enabled = false; 760 761 ctx->VertexProgram._MaintainTnlProgram = true; 762 ctx->FragmentProgram._MaintainTexEnvProgram = true; 763 764 brw_draw_init( brw ); 765 766 if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) { 767 /* Turn on some extra GL_ARB_debug_output generation. */ 768 brw->perf_debug = true; 769 } 770 771 if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0) 772 ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB; 773 774 if (INTEL_DEBUG & DEBUG_SHADER_TIME) 775 brw_init_shader_time(brw); 776 777 _mesa_compute_version(ctx); 778 779 _mesa_initialize_dispatch_tables(ctx); 780 _mesa_initialize_vbo_vtxfmt(ctx); 781 782 if (ctx->Extensions.AMD_performance_monitor) { 783 brw_init_performance_monitors(brw); 784 } 785 786 vbo_use_buffer_objects(ctx); 787 vbo_always_unmap_buffers(ctx); 788 789 return true; 790} 791 792void 793intelDestroyContext(__DRIcontext * driContextPriv) 794{ 795 struct brw_context *brw = 796 (struct brw_context *) driContextPriv->driverPrivate; 797 struct gl_context *ctx = &brw->ctx; 798 799 assert(brw); /* should never be null */ 800 if (!brw) 801 return; 802 803 /* Dump a final BMP in case the application doesn't call SwapBuffers */ 804 if (INTEL_DEBUG & DEBUG_AUB) { 805 intel_batchbuffer_flush(brw); 806 aub_dump_bmp(&brw->ctx); 807 } 808 809 _mesa_meta_free(&brw->ctx); 810 811 if (INTEL_DEBUG & DEBUG_SHADER_TIME) { 812 /* Force a report. */ 813 brw->shader_time.report_time = 0; 814 815 brw_collect_and_report_shader_time(brw); 816 brw_destroy_shader_time(brw); 817 } 818 819 brw_destroy_state(brw); 820 brw_draw_destroy(brw); 821 822 drm_intel_bo_unreference(brw->curbe.curbe_bo); 823 824 drm_intel_gem_context_destroy(brw->hw_ctx); 825 826 if (ctx->swrast_context) { 827 _swsetup_DestroyContext(&brw->ctx); 828 _tnl_DestroyContext(&brw->ctx); 829 } 830 _vbo_DestroyContext(&brw->ctx); 831 832 if (ctx->swrast_context) 833 _swrast_DestroyContext(&brw->ctx); 834 835 intel_batchbuffer_free(brw); 836 837 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch); 838 brw->first_post_swapbuffers_batch = NULL; 839 840 driDestroyOptionCache(&brw->optionCache); 841 842 /* free the Mesa context */ 843 _mesa_free_context_data(&brw->ctx); 844 845 ralloc_free(brw); 846 driContextPriv->driverPrivate = NULL; 847} 848 849GLboolean 850intelUnbindContext(__DRIcontext * driContextPriv) 851{ 852 /* Unset current context and dispath table */ 853 _mesa_make_current(NULL, NULL, NULL); 854 855 return true; 856} 857 858/** 859 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior 860 * on window system framebuffers. 861 * 862 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if 863 * your renderbuffer can do sRGB encode, and you can flip a switch that does 864 * sRGB encode if the renderbuffer can handle it. You can ask specifically 865 * for a visual where you're guaranteed to be capable, but it turns out that 866 * everyone just makes all their ARGB8888 visuals capable and doesn't offer 867 * incapable ones, becuase there's no difference between the two in resources 868 * used. Applications thus get built that accidentally rely on the default 869 * visual choice being sRGB, so we make ours sRGB capable. Everything sounds 870 * great... 871 * 872 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode 873 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent. 874 * So they removed the enable knob and made it "if the renderbuffer is sRGB 875 * capable, do sRGB encode". Then, for your window system renderbuffers, you 876 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals 877 * and get no sRGB encode (assuming that both kinds of visual are available). 878 * Thus our choice to support sRGB by default on our visuals for desktop would 879 * result in broken rendering of GLES apps that aren't expecting sRGB encode. 880 * 881 * Unfortunately, renderbuffer setup happens before a context is created. So 882 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3 883 * context (without an sRGB visual, though we don't have sRGB visuals exposed 884 * yet), we go turn that back off before anyone finds out. 885 */ 886static void 887intel_gles3_srgb_workaround(struct brw_context *brw, 888 struct gl_framebuffer *fb) 889{ 890 struct gl_context *ctx = &brw->ctx; 891 892 if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable) 893 return; 894 895 /* Some day when we support the sRGB capable bit on visuals available for 896 * GLES, we'll need to respect that and not disable things here. 897 */ 898 fb->Visual.sRGBCapable = false; 899 for (int i = 0; i < BUFFER_COUNT; i++) { 900 if (fb->Attachment[i].Renderbuffer && 901 fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_B8G8R8A8_SRGB) { 902 fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_B8G8R8A8_UNORM; 903 } 904 } 905} 906 907GLboolean 908intelMakeCurrent(__DRIcontext * driContextPriv, 909 __DRIdrawable * driDrawPriv, 910 __DRIdrawable * driReadPriv) 911{ 912 struct brw_context *brw; 913 GET_CURRENT_CONTEXT(curCtx); 914 915 if (driContextPriv) 916 brw = (struct brw_context *) driContextPriv->driverPrivate; 917 else 918 brw = NULL; 919 920 /* According to the glXMakeCurrent() man page: "Pending commands to 921 * the previous context, if any, are flushed before it is released." 922 * But only flush if we're actually changing contexts. 923 */ 924 if (brw_context(curCtx) && brw_context(curCtx) != brw) { 925 _mesa_flush(curCtx); 926 } 927 928 if (driContextPriv) { 929 struct gl_context *ctx = &brw->ctx; 930 struct gl_framebuffer *fb, *readFb; 931 932 if (driDrawPriv == NULL && driReadPriv == NULL) { 933 fb = _mesa_get_incomplete_framebuffer(); 934 readFb = _mesa_get_incomplete_framebuffer(); 935 } else { 936 fb = driDrawPriv->driverPrivate; 937 readFb = driReadPriv->driverPrivate; 938 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1; 939 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1; 940 } 941 942 /* The sRGB workaround changes the renderbuffer's format. We must change 943 * the format before the renderbuffer's miptree get's allocated, otherwise 944 * the formats of the renderbuffer and its miptree will differ. 945 */ 946 intel_gles3_srgb_workaround(brw, fb); 947 intel_gles3_srgb_workaround(brw, readFb); 948 949 /* If the context viewport hasn't been initialized, force a call out to 950 * the loader to get buffers so we have a drawable size for the initial 951 * viewport. */ 952 if (!brw->ctx.ViewportInitialized) 953 intel_prepare_render(brw); 954 955 _mesa_make_current(ctx, fb, readFb); 956 } else { 957 _mesa_make_current(NULL, NULL, NULL); 958 } 959 960 return true; 961} 962 963void 964intel_resolve_for_dri2_flush(struct brw_context *brw, 965 __DRIdrawable *drawable) 966{ 967 if (brw->gen < 6) { 968 /* MSAA and fast color clear are not supported, so don't waste time 969 * checking whether a resolve is needed. 970 */ 971 return; 972 } 973 974 struct gl_framebuffer *fb = drawable->driverPrivate; 975 struct intel_renderbuffer *rb; 976 977 /* Usually, only the back buffer will need to be downsampled. However, 978 * the front buffer will also need it if the user has rendered into it. 979 */ 980 static const gl_buffer_index buffers[2] = { 981 BUFFER_BACK_LEFT, 982 BUFFER_FRONT_LEFT, 983 }; 984 985 for (int i = 0; i < 2; ++i) { 986 rb = intel_get_renderbuffer(fb, buffers[i]); 987 if (rb == NULL || rb->mt == NULL) 988 continue; 989 if (rb->mt->num_samples <= 1) 990 intel_miptree_resolve_color(brw, rb->mt); 991 else 992 intel_renderbuffer_downsample(brw, rb); 993 } 994} 995 996static unsigned 997intel_bits_per_pixel(const struct intel_renderbuffer *rb) 998{ 999 return _mesa_get_format_bytes(intel_rb_format(rb)) * 8; 1000} 1001 1002static void 1003intel_query_dri2_buffers(struct brw_context *brw, 1004 __DRIdrawable *drawable, 1005 __DRIbuffer **buffers, 1006 int *count); 1007 1008static void 1009intel_process_dri2_buffer(struct brw_context *brw, 1010 __DRIdrawable *drawable, 1011 __DRIbuffer *buffer, 1012 struct intel_renderbuffer *rb, 1013 const char *buffer_name); 1014 1015static void 1016intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable); 1017 1018static void 1019intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable) 1020{ 1021 struct gl_framebuffer *fb = drawable->driverPrivate; 1022 struct intel_renderbuffer *rb; 1023 __DRIbuffer *buffers = NULL; 1024 int i, count; 1025 const char *region_name; 1026 1027 /* Set this up front, so that in case our buffers get invalidated 1028 * while we're getting new buffers, we don't clobber the stamp and 1029 * thus ignore the invalidate. */ 1030 drawable->lastStamp = drawable->dri2.stamp; 1031 1032 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) 1033 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable); 1034 1035 intel_query_dri2_buffers(brw, drawable, &buffers, &count); 1036 1037 if (buffers == NULL) 1038 return; 1039 1040 for (i = 0; i < count; i++) { 1041 switch (buffers[i].attachment) { 1042 case __DRI_BUFFER_FRONT_LEFT: 1043 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT); 1044 region_name = "dri2 front buffer"; 1045 break; 1046 1047 case __DRI_BUFFER_FAKE_FRONT_LEFT: 1048 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT); 1049 region_name = "dri2 fake front buffer"; 1050 break; 1051 1052 case __DRI_BUFFER_BACK_LEFT: 1053 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT); 1054 region_name = "dri2 back buffer"; 1055 break; 1056 1057 case __DRI_BUFFER_DEPTH: 1058 case __DRI_BUFFER_HIZ: 1059 case __DRI_BUFFER_DEPTH_STENCIL: 1060 case __DRI_BUFFER_STENCIL: 1061 case __DRI_BUFFER_ACCUM: 1062 default: 1063 fprintf(stderr, 1064 "unhandled buffer attach event, attachment type %d\n", 1065 buffers[i].attachment); 1066 return; 1067 } 1068 1069 intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name); 1070 } 1071 1072} 1073 1074void 1075intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable) 1076{ 1077 struct brw_context *brw = context->driverPrivate; 1078 __DRIscreen *screen = brw->intelScreen->driScrnPriv; 1079 1080 /* Set this up front, so that in case our buffers get invalidated 1081 * while we're getting new buffers, we don't clobber the stamp and 1082 * thus ignore the invalidate. */ 1083 drawable->lastStamp = drawable->dri2.stamp; 1084 1085 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) 1086 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable); 1087 1088 if (screen->image.loader) 1089 intel_update_image_buffers(brw, drawable); 1090 else 1091 intel_update_dri2_buffers(brw, drawable); 1092 1093 driUpdateFramebufferSize(&brw->ctx, drawable); 1094} 1095 1096/** 1097 * intel_prepare_render should be called anywhere that curent read/drawbuffer 1098 * state is required. 1099 */ 1100void 1101intel_prepare_render(struct brw_context *brw) 1102{ 1103 struct gl_context *ctx = &brw->ctx; 1104 __DRIcontext *driContext = brw->driContext; 1105 __DRIdrawable *drawable; 1106 1107 drawable = driContext->driDrawablePriv; 1108 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) { 1109 if (drawable->lastStamp != drawable->dri2.stamp) 1110 intel_update_renderbuffers(driContext, drawable); 1111 driContext->dri2.draw_stamp = drawable->dri2.stamp; 1112 } 1113 1114 drawable = driContext->driReadablePriv; 1115 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) { 1116 if (drawable->lastStamp != drawable->dri2.stamp) 1117 intel_update_renderbuffers(driContext, drawable); 1118 driContext->dri2.read_stamp = drawable->dri2.stamp; 1119 } 1120 1121 /* If we're currently rendering to the front buffer, the rendering 1122 * that will happen next will probably dirty the front buffer. So 1123 * mark it as dirty here. 1124 */ 1125 if (brw_is_front_buffer_drawing(ctx->DrawBuffer)) 1126 brw->front_buffer_dirty = true; 1127 1128 /* Wait for the swapbuffers before the one we just emitted, so we 1129 * don't get too many swaps outstanding for apps that are GPU-heavy 1130 * but not CPU-heavy. 1131 * 1132 * We're using intelDRI2Flush (called from the loader before 1133 * swapbuffer) and glFlush (for front buffer rendering) as the 1134 * indicator that a frame is done and then throttle when we get 1135 * here as we prepare to render the next frame. At this point for 1136 * round trips for swap/copy and getting new buffers are done and 1137 * we'll spend less time waiting on the GPU. 1138 * 1139 * Unfortunately, we don't have a handle to the batch containing 1140 * the swap, and getting our hands on that doesn't seem worth it, 1141 * so we just us the first batch we emitted after the last swap. 1142 */ 1143 if (brw->need_throttle && brw->first_post_swapbuffers_batch) { 1144 if (!brw->disable_throttling) 1145 drm_intel_bo_wait_rendering(brw->first_post_swapbuffers_batch); 1146 drm_intel_bo_unreference(brw->first_post_swapbuffers_batch); 1147 brw->first_post_swapbuffers_batch = NULL; 1148 brw->need_throttle = false; 1149 } 1150} 1151 1152/** 1153 * \brief Query DRI2 to obtain a DRIdrawable's buffers. 1154 * 1155 * To determine which DRI buffers to request, examine the renderbuffers 1156 * attached to the drawable's framebuffer. Then request the buffers with 1157 * DRI2GetBuffers() or DRI2GetBuffersWithFormat(). 1158 * 1159 * This is called from intel_update_renderbuffers(). 1160 * 1161 * \param drawable Drawable whose buffers are queried. 1162 * \param buffers [out] List of buffers returned by DRI2 query. 1163 * \param buffer_count [out] Number of buffers returned. 1164 * 1165 * \see intel_update_renderbuffers() 1166 * \see DRI2GetBuffers() 1167 * \see DRI2GetBuffersWithFormat() 1168 */ 1169static void 1170intel_query_dri2_buffers(struct brw_context *brw, 1171 __DRIdrawable *drawable, 1172 __DRIbuffer **buffers, 1173 int *buffer_count) 1174{ 1175 __DRIscreen *screen = brw->intelScreen->driScrnPriv; 1176 struct gl_framebuffer *fb = drawable->driverPrivate; 1177 int i = 0; 1178 unsigned attachments[8]; 1179 1180 struct intel_renderbuffer *front_rb; 1181 struct intel_renderbuffer *back_rb; 1182 1183 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT); 1184 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT); 1185 1186 memset(attachments, 0, sizeof(attachments)); 1187 if ((brw_is_front_buffer_drawing(fb) || 1188 brw_is_front_buffer_reading(fb) || 1189 !back_rb) && front_rb) { 1190 /* If a fake front buffer is in use, then querying for 1191 * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from 1192 * the real front buffer to the fake front buffer. So before doing the 1193 * query, we need to make sure all the pending drawing has landed in the 1194 * real front buffer. 1195 */ 1196 intel_batchbuffer_flush(brw); 1197 intel_flush_front(&brw->ctx); 1198 1199 attachments[i++] = __DRI_BUFFER_FRONT_LEFT; 1200 attachments[i++] = intel_bits_per_pixel(front_rb); 1201 } else if (front_rb && brw->front_buffer_dirty) { 1202 /* We have pending front buffer rendering, but we aren't querying for a 1203 * front buffer. If the front buffer we have is a fake front buffer, 1204 * the X server is going to throw it away when it processes the query. 1205 * So before doing the query, make sure all the pending drawing has 1206 * landed in the real front buffer. 1207 */ 1208 intel_batchbuffer_flush(brw); 1209 intel_flush_front(&brw->ctx); 1210 } 1211 1212 if (back_rb) { 1213 attachments[i++] = __DRI_BUFFER_BACK_LEFT; 1214 attachments[i++] = intel_bits_per_pixel(back_rb); 1215 } 1216 1217 assert(i <= ARRAY_SIZE(attachments)); 1218 1219 *buffers = screen->dri2.loader->getBuffersWithFormat(drawable, 1220 &drawable->w, 1221 &drawable->h, 1222 attachments, i / 2, 1223 buffer_count, 1224 drawable->loaderPrivate); 1225} 1226 1227/** 1228 * \brief Assign a DRI buffer's DRM region to a renderbuffer. 1229 * 1230 * This is called from intel_update_renderbuffers(). 1231 * 1232 * \par Note: 1233 * DRI buffers whose attachment point is DRI2BufferStencil or 1234 * DRI2BufferDepthStencil are handled as special cases. 1235 * 1236 * \param buffer_name is a human readable name, such as "dri2 front buffer", 1237 * that is passed to drm_intel_bo_gem_create_from_name(). 1238 * 1239 * \see intel_update_renderbuffers() 1240 */ 1241static void 1242intel_process_dri2_buffer(struct brw_context *brw, 1243 __DRIdrawable *drawable, 1244 __DRIbuffer *buffer, 1245 struct intel_renderbuffer *rb, 1246 const char *buffer_name) 1247{ 1248 struct gl_framebuffer *fb = drawable->driverPrivate; 1249 drm_intel_bo *bo; 1250 1251 if (!rb) 1252 return; 1253 1254 unsigned num_samples = rb->Base.Base.NumSamples; 1255 1256 /* We try to avoid closing and reopening the same BO name, because the first 1257 * use of a mapping of the buffer involves a bunch of page faulting which is 1258 * moderately expensive. 1259 */ 1260 struct intel_mipmap_tree *last_mt; 1261 if (num_samples == 0) 1262 last_mt = rb->mt; 1263 else 1264 last_mt = rb->singlesample_mt; 1265 1266 uint32_t old_name = 0; 1267 if (last_mt) { 1268 /* The bo already has a name because the miptree was created by a 1269 * previous call to intel_process_dri2_buffer(). If a bo already has a 1270 * name, then drm_intel_bo_flink() is a low-cost getter. It does not 1271 * create a new name. 1272 */ 1273 drm_intel_bo_flink(last_mt->bo, &old_name); 1274 } 1275 1276 if (old_name == buffer->name) 1277 return; 1278 1279 if (unlikely(INTEL_DEBUG & DEBUG_DRI)) { 1280 fprintf(stderr, 1281 "attaching buffer %d, at %d, cpp %d, pitch %d\n", 1282 buffer->name, buffer->attachment, 1283 buffer->cpp, buffer->pitch); 1284 } 1285 1286 intel_miptree_release(&rb->mt); 1287 bo = drm_intel_bo_gem_create_from_name(brw->bufmgr, buffer_name, 1288 buffer->name); 1289 if (!bo) { 1290 fprintf(stderr, 1291 "Failed to open BO for returned DRI2 buffer " 1292 "(%dx%d, %s, named %d).\n" 1293 "This is likely a bug in the X Server that will lead to a " 1294 "crash soon.\n", 1295 drawable->w, drawable->h, buffer_name, buffer->name); 1296 return; 1297 } 1298 1299 intel_update_winsys_renderbuffer_miptree(brw, rb, bo, 1300 drawable->w, drawable->h, 1301 buffer->pitch); 1302 1303 if (brw_is_front_buffer_drawing(fb) && 1304 (buffer->attachment == __DRI_BUFFER_FRONT_LEFT || 1305 buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) && 1306 rb->Base.Base.NumSamples > 1) { 1307 intel_renderbuffer_upsample(brw, rb); 1308 } 1309 1310 assert(rb->mt); 1311 1312 drm_intel_bo_unreference(bo); 1313} 1314 1315/** 1316 * \brief Query DRI image loader to obtain a DRIdrawable's buffers. 1317 * 1318 * To determine which DRI buffers to request, examine the renderbuffers 1319 * attached to the drawable's framebuffer. Then request the buffers from 1320 * the image loader 1321 * 1322 * This is called from intel_update_renderbuffers(). 1323 * 1324 * \param drawable Drawable whose buffers are queried. 1325 * \param buffers [out] List of buffers returned by DRI2 query. 1326 * \param buffer_count [out] Number of buffers returned. 1327 * 1328 * \see intel_update_renderbuffers() 1329 */ 1330 1331static void 1332intel_update_image_buffer(struct brw_context *intel, 1333 __DRIdrawable *drawable, 1334 struct intel_renderbuffer *rb, 1335 __DRIimage *buffer, 1336 enum __DRIimageBufferMask buffer_type) 1337{ 1338 struct gl_framebuffer *fb = drawable->driverPrivate; 1339 1340 if (!rb || !buffer->bo) 1341 return; 1342 1343 unsigned num_samples = rb->Base.Base.NumSamples; 1344 1345 /* Check and see if we're already bound to the right 1346 * buffer object 1347 */ 1348 struct intel_mipmap_tree *last_mt; 1349 if (num_samples == 0) 1350 last_mt = rb->mt; 1351 else 1352 last_mt = rb->singlesample_mt; 1353 1354 if (last_mt && last_mt->bo == buffer->bo) 1355 return; 1356 1357 intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo, 1358 buffer->width, buffer->height, 1359 buffer->pitch); 1360 1361 if (brw_is_front_buffer_drawing(fb) && 1362 buffer_type == __DRI_IMAGE_BUFFER_FRONT && 1363 rb->Base.Base.NumSamples > 1) { 1364 intel_renderbuffer_upsample(intel, rb); 1365 } 1366} 1367 1368static void 1369intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable) 1370{ 1371 struct gl_framebuffer *fb = drawable->driverPrivate; 1372 __DRIscreen *screen = brw->intelScreen->driScrnPriv; 1373 struct intel_renderbuffer *front_rb; 1374 struct intel_renderbuffer *back_rb; 1375 struct __DRIimageList images; 1376 unsigned int format; 1377 uint32_t buffer_mask = 0; 1378 1379 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT); 1380 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT); 1381 1382 if (back_rb) 1383 format = intel_rb_format(back_rb); 1384 else if (front_rb) 1385 format = intel_rb_format(front_rb); 1386 else 1387 return; 1388 1389 if (front_rb && (brw_is_front_buffer_drawing(fb) || 1390 brw_is_front_buffer_reading(fb) || !back_rb)) { 1391 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT; 1392 } 1393 1394 if (back_rb) 1395 buffer_mask |= __DRI_IMAGE_BUFFER_BACK; 1396 1397 (*screen->image.loader->getBuffers) (drawable, 1398 driGLFormatToImageFormat(format), 1399 &drawable->dri2.stamp, 1400 drawable->loaderPrivate, 1401 buffer_mask, 1402 &images); 1403 1404 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) { 1405 drawable->w = images.front->width; 1406 drawable->h = images.front->height; 1407 intel_update_image_buffer(brw, 1408 drawable, 1409 front_rb, 1410 images.front, 1411 __DRI_IMAGE_BUFFER_FRONT); 1412 } 1413 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) { 1414 drawable->w = images.back->width; 1415 drawable->h = images.back->height; 1416 intel_update_image_buffer(brw, 1417 drawable, 1418 back_rb, 1419 images.back, 1420 __DRI_IMAGE_BUFFER_BACK); 1421 } 1422} 1423