1/************************************************************************** 2 * 3 * Copyright 2003 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29 30#include "main/glheader.h" 31#include "main/mtypes.h" 32#include "main/imports.h" 33#include "main/macros.h" 34#include "main/renderbuffer.h" 35#include "main/framebuffer.h" 36 37#include "tnl/tnl.h" 38#include "tnl/t_context.h" 39#include "tnl/t_vertex.h" 40#include "swrast_setup/swrast_setup.h" 41 42#include "intel_batchbuffer.h" 43#include "intel_mipmap_tree.h" 44#include "intel_regions.h" 45#include "intel_tris.h" 46#include "intel_fbo.h" 47#include "intel_buffers.h" 48 49#include "i915_reg.h" 50#include "i915_context.h" 51 52static void 53i915_render_prevalidate(struct intel_context *intel) 54{ 55 struct i915_context *i915 = i915_context(&intel->ctx); 56 57 i915ValidateFragmentProgram(i915); 58} 59 60static void 61i915_render_start(struct intel_context *intel) 62{ 63 intel_prepare_render(intel); 64} 65 66 67static void 68i915_reduced_primitive_state(struct intel_context *intel, GLenum rprim) 69{ 70 struct i915_context *i915 = i915_context(&intel->ctx); 71 GLuint st1 = i915->state.Stipple[I915_STPREG_ST1]; 72 73 st1 &= ~ST1_ENABLE; 74 75 switch (rprim) { 76 case GL_QUADS: /* from RASTERIZE(GL_QUADS) in t_dd_tritemp.h */ 77 case GL_TRIANGLES: 78 if (intel->ctx.Polygon.StippleFlag && intel->hw_stipple) 79 st1 |= ST1_ENABLE; 80 break; 81 case GL_LINES: 82 case GL_POINTS: 83 default: 84 break; 85 } 86 87 i915->intel.reduced_primitive = rprim; 88 89 if (st1 != i915->state.Stipple[I915_STPREG_ST1]) { 90 INTEL_FIREVERTICES(intel); 91 92 I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE); 93 i915->state.Stipple[I915_STPREG_ST1] = st1; 94 } 95} 96 97 98/* Pull apart the vertex format registers and figure out how large a 99 * vertex is supposed to be. 100 */ 101static bool 102i915_check_vertex_size(struct intel_context *intel, GLuint expected) 103{ 104 struct i915_context *i915 = i915_context(&intel->ctx); 105 int lis2 = i915->state.Ctx[I915_CTXREG_LIS2]; 106 int lis4 = i915->state.Ctx[I915_CTXREG_LIS4]; 107 int i, sz = 0; 108 109 switch (lis4 & S4_VFMT_XYZW_MASK) { 110 case S4_VFMT_XY: 111 sz = 2; 112 break; 113 case S4_VFMT_XYZ: 114 sz = 3; 115 break; 116 case S4_VFMT_XYW: 117 sz = 3; 118 break; 119 case S4_VFMT_XYZW: 120 sz = 4; 121 break; 122 default: 123 fprintf(stderr, "no xyzw specified\n"); 124 return 0; 125 } 126 127 if (lis4 & S4_VFMT_SPEC_FOG) 128 sz++; 129 if (lis4 & S4_VFMT_COLOR) 130 sz++; 131 if (lis4 & S4_VFMT_DEPTH_OFFSET) 132 sz++; 133 if (lis4 & S4_VFMT_POINT_WIDTH) 134 sz++; 135 if (lis4 & S4_VFMT_FOG_PARAM) 136 sz++; 137 138 for (i = 0; i < 8; i++) { 139 switch (lis2 & S2_TEXCOORD_FMT0_MASK) { 140 case TEXCOORDFMT_2D: 141 sz += 2; 142 break; 143 case TEXCOORDFMT_3D: 144 sz += 3; 145 break; 146 case TEXCOORDFMT_4D: 147 sz += 4; 148 break; 149 case TEXCOORDFMT_1D: 150 sz += 1; 151 break; 152 case TEXCOORDFMT_2D_16: 153 sz += 1; 154 break; 155 case TEXCOORDFMT_4D_16: 156 sz += 2; 157 break; 158 case TEXCOORDFMT_NOT_PRESENT: 159 break; 160 default: 161 fprintf(stderr, "bad texcoord fmt %d\n", i); 162 return false; 163 } 164 lis2 >>= S2_TEXCOORD_FMT1_SHIFT; 165 } 166 167 if (sz != expected) 168 fprintf(stderr, "vertex size mismatch %d/%d\n", sz, expected); 169 170 return sz == expected; 171} 172 173 174static void 175i915_emit_invarient_state(struct intel_context *intel) 176{ 177 BATCH_LOCALS; 178 179 BEGIN_BATCH(17); 180 181 OUT_BATCH(_3DSTATE_AA_CMD | 182 AA_LINE_ECAAR_WIDTH_ENABLE | 183 AA_LINE_ECAAR_WIDTH_1_0 | 184 AA_LINE_REGION_WIDTH_ENABLE | AA_LINE_REGION_WIDTH_1_0); 185 186 OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD); 187 OUT_BATCH(0); 188 189 OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD); 190 OUT_BATCH(0); 191 192 OUT_BATCH(_3DSTATE_DFLT_Z_CMD); 193 OUT_BATCH(0); 194 195 /* Don't support texture crossbar yet */ 196 OUT_BATCH(_3DSTATE_COORD_SET_BINDINGS | 197 CSB_TCB(0, 0) | 198 CSB_TCB(1, 1) | 199 CSB_TCB(2, 2) | 200 CSB_TCB(3, 3) | 201 CSB_TCB(4, 4) | CSB_TCB(5, 5) | CSB_TCB(6, 6) | CSB_TCB(7, 7)); 202 203 /* Need to initialize this to zero. 204 */ 205 OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(3) | (0)); 206 OUT_BATCH(0); 207 208 OUT_BATCH(_3DSTATE_SCISSOR_RECT_0_CMD); 209 OUT_BATCH(0); 210 OUT_BATCH(0); 211 212 /* XXX: Use this */ 213 OUT_BATCH(_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT); 214 215 OUT_BATCH(_3DSTATE_DEPTH_SUBRECT_DISABLE); 216 217 OUT_BATCH(_3DSTATE_LOAD_INDIRECT | 0); /* disable indirect state */ 218 OUT_BATCH(0); 219 220 ADVANCE_BATCH(); 221} 222 223 224#define emit(intel, state, size ) \ 225 intel_batchbuffer_data(intel, state, size) 226 227static GLuint 228get_dirty(struct i915_hw_state *state) 229{ 230 GLuint dirty; 231 232 /* Workaround the multitex hang - if one texture unit state is 233 * modified, emit all texture units. 234 */ 235 dirty = state->active & ~state->emitted; 236 if (dirty & I915_UPLOAD_TEX_ALL) 237 state->emitted &= ~I915_UPLOAD_TEX_ALL; 238 dirty = state->active & ~state->emitted; 239 return dirty; 240} 241 242 243static GLuint 244get_state_size(struct i915_hw_state *state) 245{ 246 GLuint dirty = get_dirty(state); 247 GLuint i; 248 GLuint sz = 0; 249 250 if (dirty & I915_UPLOAD_INVARIENT) 251 sz += 30 * 4; 252 253 if (dirty & I915_UPLOAD_RASTER_RULES) 254 sz += sizeof(state->RasterRules); 255 256 if (dirty & I915_UPLOAD_CTX) 257 sz += sizeof(state->Ctx); 258 259 if (dirty & I915_UPLOAD_BLEND) 260 sz += sizeof(state->Blend); 261 262 if (dirty & I915_UPLOAD_BUFFERS) 263 sz += sizeof(state->Buffer); 264 265 if (dirty & I915_UPLOAD_STIPPLE) 266 sz += sizeof(state->Stipple); 267 268 if (dirty & I915_UPLOAD_TEX_ALL) { 269 int nr = 0; 270 for (i = 0; i < I915_TEX_UNITS; i++) 271 if (dirty & I915_UPLOAD_TEX(i)) 272 nr++; 273 274 sz += (2 + nr * 3) * sizeof(GLuint) * 2; 275 } 276 277 if (dirty & I915_UPLOAD_CONSTANTS) 278 sz += state->ConstantSize * sizeof(GLuint); 279 280 if (dirty & I915_UPLOAD_PROGRAM) 281 sz += state->ProgramSize * sizeof(GLuint); 282 283 return sz; 284} 285 286/* Push the state into the sarea and/or texture memory. 287 */ 288static void 289i915_emit_state(struct intel_context *intel) 290{ 291 struct i915_context *i915 = i915_context(&intel->ctx); 292 struct i915_hw_state *state = &i915->state; 293 int i, count, aper_count; 294 GLuint dirty; 295 drm_intel_bo *aper_array[3 + I915_TEX_UNITS]; 296 GET_CURRENT_CONTEXT(ctx); 297 BATCH_LOCALS; 298 299 /* We don't hold the lock at this point, so want to make sure that 300 * there won't be a buffer wrap between the state emits and the primitive 301 * emit header. 302 * 303 * It might be better to talk about explicit places where 304 * scheduling is allowed, rather than assume that it is whenever a 305 * batchbuffer fills up. 306 */ 307 intel_batchbuffer_require_space(intel, 308 get_state_size(state) + 309 INTEL_PRIM_EMIT_SIZE); 310 count = 0; 311 again: 312 if (intel->batch.bo == NULL) { 313 _mesa_error(ctx, GL_OUT_OF_MEMORY, "i915 emit state"); 314 assert(0); 315 } 316 aper_count = 0; 317 dirty = get_dirty(state); 318 319 aper_array[aper_count++] = intel->batch.bo; 320 if (dirty & I915_UPLOAD_BUFFERS) { 321 if (state->draw_region) 322 aper_array[aper_count++] = state->draw_region->bo; 323 if (state->depth_region) 324 aper_array[aper_count++] = state->depth_region->bo; 325 } 326 327 if (dirty & I915_UPLOAD_TEX_ALL) { 328 for (i = 0; i < I915_TEX_UNITS; i++) { 329 if (dirty & I915_UPLOAD_TEX(i)) { 330 if (state->tex_buffer[i]) { 331 aper_array[aper_count++] = state->tex_buffer[i]; 332 } 333 } 334 } 335 } 336 337 if (dri_bufmgr_check_aperture_space(aper_array, aper_count)) { 338 if (count == 0) { 339 count++; 340 intel_batchbuffer_flush(intel); 341 goto again; 342 } else { 343 _mesa_error(ctx, GL_OUT_OF_MEMORY, "i915 emit state"); 344 assert(0); 345 } 346 } 347 348 /* work out list of buffers to emit */ 349 350 /* Do this here as we may have flushed the batchbuffer above, 351 * causing more state to be dirty! 352 */ 353 dirty = get_dirty(state); 354 state->emitted |= dirty; 355 assert(get_dirty(state) == 0); 356 357 if (INTEL_DEBUG & DEBUG_STATE) 358 fprintf(stderr, "%s dirty: %x\n", __func__, dirty); 359 360 if (dirty & I915_UPLOAD_INVARIENT) { 361 if (INTEL_DEBUG & DEBUG_STATE) 362 fprintf(stderr, "I915_UPLOAD_INVARIENT:\n"); 363 i915_emit_invarient_state(intel); 364 } 365 366 if (dirty & I915_UPLOAD_RASTER_RULES) { 367 if (INTEL_DEBUG & DEBUG_STATE) 368 fprintf(stderr, "I915_UPLOAD_RASTER_RULES:\n"); 369 emit(intel, state->RasterRules, sizeof(state->RasterRules)); 370 } 371 372 if (dirty & I915_UPLOAD_CTX) { 373 if (INTEL_DEBUG & DEBUG_STATE) 374 fprintf(stderr, "I915_UPLOAD_CTX:\n"); 375 376 emit(intel, state->Ctx, sizeof(state->Ctx)); 377 } 378 379 if (dirty & I915_UPLOAD_BLEND) { 380 if (INTEL_DEBUG & DEBUG_STATE) 381 fprintf(stderr, "I915_UPLOAD_BLEND:\n"); 382 383 emit(intel, state->Blend, sizeof(state->Blend)); 384 } 385 386 if (dirty & I915_UPLOAD_BUFFERS) { 387 GLuint count; 388 389 if (INTEL_DEBUG & DEBUG_STATE) 390 fprintf(stderr, "I915_UPLOAD_BUFFERS:\n"); 391 392 count = 17; 393 if (state->Buffer[I915_DESTREG_DRAWRECT0] != MI_NOOP) 394 count++; 395 396 BEGIN_BATCH(count); 397 OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR0]); 398 OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR1]); 399 if (state->draw_region) { 400 OUT_RELOC(state->draw_region->bo, 401 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0); 402 } else { 403 OUT_BATCH(0); 404 } 405 406 OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR0]); 407 OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR1]); 408 if (state->depth_region) { 409 OUT_RELOC(state->depth_region->bo, 410 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0); 411 } else { 412 OUT_BATCH(0); 413 } 414 415 OUT_BATCH(state->Buffer[I915_DESTREG_DV0]); 416 OUT_BATCH(state->Buffer[I915_DESTREG_DV1]); 417 OUT_BATCH(state->Buffer[I915_DESTREG_SR0]); 418 OUT_BATCH(state->Buffer[I915_DESTREG_SR1]); 419 OUT_BATCH(state->Buffer[I915_DESTREG_SR2]); 420 OUT_BATCH(state->Buffer[I915_DESTREG_SENABLE]); 421 422 if (state->Buffer[I915_DESTREG_DRAWRECT0] != MI_NOOP) 423 OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT0]); 424 OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT1]); 425 OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT2]); 426 OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT3]); 427 OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT4]); 428 OUT_BATCH(state->Buffer[I915_DESTREG_DRAWRECT5]); 429 430 ADVANCE_BATCH(); 431 } 432 433 if (dirty & I915_UPLOAD_STIPPLE) { 434 if (INTEL_DEBUG & DEBUG_STATE) 435 fprintf(stderr, "I915_UPLOAD_STIPPLE:\n"); 436 emit(intel, state->Stipple, sizeof(state->Stipple)); 437 } 438 439 /* Combine all the dirty texture state into a single command to 440 * avoid lockups on I915 hardware. 441 */ 442 if (dirty & I915_UPLOAD_TEX_ALL) { 443 int nr = 0; 444 GLuint unwind; 445 446 for (i = 0; i < I915_TEX_UNITS; i++) 447 if (dirty & I915_UPLOAD_TEX(i)) 448 nr++; 449 450 BEGIN_BATCH(2 + nr * 3); 451 OUT_BATCH(_3DSTATE_MAP_STATE | (3 * nr)); 452 OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT); 453 for (i = 0; i < I915_TEX_UNITS; i++) 454 if (dirty & I915_UPLOAD_TEX(i)) { 455 OUT_RELOC(state->tex_buffer[i], 456 I915_GEM_DOMAIN_SAMPLER, 0, 457 state->tex_offset[i]); 458 459 OUT_BATCH(state->Tex[i][I915_TEXREG_MS3]); 460 OUT_BATCH(state->Tex[i][I915_TEXREG_MS4]); 461 } 462 ADVANCE_BATCH(); 463 464 unwind = intel->batch.used; 465 BEGIN_BATCH(2 + nr * 3); 466 OUT_BATCH(_3DSTATE_SAMPLER_STATE | (3 * nr)); 467 OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT); 468 for (i = 0; i < I915_TEX_UNITS; i++) 469 if (dirty & I915_UPLOAD_TEX(i)) { 470 OUT_BATCH(state->Tex[i][I915_TEXREG_SS2]); 471 OUT_BATCH(state->Tex[i][I915_TEXREG_SS3]); 472 OUT_BATCH(state->Tex[i][I915_TEXREG_SS4]); 473 } 474 ADVANCE_BATCH(); 475 if (i915->last_sampler && 476 memcmp(intel->batch.map + i915->last_sampler, 477 intel->batch.map + unwind, 478 (2 + nr*3)*sizeof(int)) == 0) 479 intel->batch.used = unwind; 480 else 481 i915->last_sampler = unwind; 482 } 483 484 if (dirty & I915_UPLOAD_CONSTANTS) { 485 if (INTEL_DEBUG & DEBUG_STATE) 486 fprintf(stderr, "I915_UPLOAD_CONSTANTS:\n"); 487 emit(intel, state->Constant, state->ConstantSize * sizeof(GLuint)); 488 } 489 490 if (dirty & I915_UPLOAD_PROGRAM) { 491 if (state->ProgramSize) { 492 if (INTEL_DEBUG & DEBUG_STATE) 493 fprintf(stderr, "I915_UPLOAD_PROGRAM:\n"); 494 495 assert((state->Program[0] & 0x1ff) + 2 == state->ProgramSize); 496 497 emit(intel, state->Program, state->ProgramSize * sizeof(GLuint)); 498 if (INTEL_DEBUG & DEBUG_STATE) 499 i915_disassemble_program(state->Program, state->ProgramSize); 500 } 501 } 502 503 assert(get_dirty(state) == 0); 504} 505 506static void 507i915_destroy_context(struct intel_context *intel) 508{ 509 GLuint i; 510 struct i915_context *i915 = i915_context(&intel->ctx); 511 512 intel_region_release(&i915->state.draw_region); 513 intel_region_release(&i915->state.depth_region); 514 515 for (i = 0; i < I915_TEX_UNITS; i++) { 516 if (i915->state.tex_buffer[i] != NULL) { 517 drm_intel_bo_unreference(i915->state.tex_buffer[i]); 518 i915->state.tex_buffer[i] = NULL; 519 } 520 } 521 522 _tnl_free_vertices(&intel->ctx); 523} 524 525void 526i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region, 527 uint32_t buffer_id) 528{ 529 state[0] = _3DSTATE_BUF_INFO_CMD; 530 state[1] = buffer_id; 531 532 if (region != NULL) { 533 state[1] |= BUF_3D_PITCH(region->pitch); 534 535 if (region->tiling != I915_TILING_NONE) { 536 state[1] |= BUF_3D_TILED_SURFACE; 537 if (region->tiling == I915_TILING_Y) 538 state[1] |= BUF_3D_TILE_WALK_Y; 539 } 540 } else { 541 /* Fill in a default pitch, since 0 is invalid. We'll be 542 * setting the buffer offset to 0 and not referencing the 543 * buffer, so the pitch could really be any valid value. 544 */ 545 state[1] |= BUF_3D_PITCH(4096); 546 } 547} 548 549static uint32_t i915_render_target_format_for_mesa_format[MESA_FORMAT_COUNT] = 550{ 551 [MESA_FORMAT_B8G8R8A8_UNORM] = DV_PF_8888, 552 [MESA_FORMAT_B8G8R8X8_UNORM] = DV_PF_8888, 553 [MESA_FORMAT_B5G6R5_UNORM] = DV_PF_565 | DITHER_FULL_ALWAYS, 554 [MESA_FORMAT_B5G5R5A1_UNORM] = DV_PF_1555 | DITHER_FULL_ALWAYS, 555 [MESA_FORMAT_B4G4R4A4_UNORM] = DV_PF_4444 | DITHER_FULL_ALWAYS, 556}; 557 558static bool 559i915_render_target_supported(struct intel_context *intel, 560 struct gl_renderbuffer *rb) 561{ 562 mesa_format format = rb->Format; 563 564 if (format == MESA_FORMAT_Z24_UNORM_S8_UINT || 565 format == MESA_FORMAT_Z24_UNORM_X8_UINT || 566 format == MESA_FORMAT_Z_UNORM16) { 567 return true; 568 } 569 570 return i915_render_target_format_for_mesa_format[format] != 0; 571} 572 573static void 574i915_set_draw_region(struct intel_context *intel, 575 struct intel_region *color_regions[], 576 struct intel_region *depth_region, 577 GLuint num_regions) 578{ 579 struct i915_context *i915 = i915_context(&intel->ctx); 580 struct gl_context *ctx = &intel->ctx; 581 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[0]; 582 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 583 struct gl_renderbuffer *drb; 584 struct intel_renderbuffer *idrb = NULL; 585 GLuint value; 586 struct i915_hw_state *state = &i915->state; 587 uint32_t draw_x, draw_y, draw_offset; 588 589 if (state->draw_region != color_regions[0]) { 590 intel_region_reference(&state->draw_region, color_regions[0]); 591 } 592 if (state->depth_region != depth_region) { 593 intel_region_reference(&state->depth_region, depth_region); 594 } 595 596 /* 597 * Set stride/cpp values 598 */ 599 i915_set_buf_info_for_region(&state->Buffer[I915_DESTREG_CBUFADDR0], 600 color_regions[0], BUF_3D_ID_COLOR_BACK); 601 602 i915_set_buf_info_for_region(&state->Buffer[I915_DESTREG_DBUFADDR0], 603 depth_region, BUF_3D_ID_DEPTH); 604 605 /* 606 * Compute/set I915_DESTREG_DV1 value 607 */ 608 value = (DSTORG_HORT_BIAS(0x8) | /* .5 */ 609 DSTORG_VERT_BIAS(0x8) | /* .5 */ 610 LOD_PRECLAMP_OGL | TEX_DEFAULT_COLOR_OGL); 611 if (irb != NULL) { 612 value |= i915_render_target_format_for_mesa_format[intel_rb_format(irb)]; 613 } else { 614 value |= DV_PF_8888; 615 } 616 617 /* This isn't quite safe, thus being hidden behind an option. When changing 618 * the value of this bit, the pipeline needs to be MI_FLUSHed. And it 619 * can only be set when a depth buffer is already defined. 620 */ 621 if (intel->is_945 && intel->use_early_z && 622 depth_region->tiling != I915_TILING_NONE) 623 value |= CLASSIC_EARLY_DEPTH; 624 625 if (depth_region && depth_region->cpp == 4) { 626 value |= DEPTH_FRMT_24_FIXED_8_OTHER; 627 } 628 else { 629 value |= DEPTH_FRMT_16_FIXED; 630 } 631 state->Buffer[I915_DESTREG_DV1] = value; 632 633 drb = ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Renderbuffer; 634 if (!drb) 635 drb = ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Renderbuffer; 636 637 if (drb) 638 idrb = intel_renderbuffer(drb); 639 640 /* We set up the drawing rectangle to be offset into the color 641 * region's location in the miptree. If it doesn't match with 642 * depth's offsets, we can't render to it. 643 * 644 * (Well, not actually true -- the hw grew a bit to let depth's 645 * offset get forced to 0,0. We may want to use that if people are 646 * hitting that case. Also, some configurations may be supportable 647 * by tweaking the start offset of the buffers around, which we 648 * can't do in general due to tiling) 649 */ 650 FALLBACK(intel, I915_FALLBACK_DRAW_OFFSET, 651 idrb && irb && (idrb->draw_x != irb->draw_x || 652 idrb->draw_y != irb->draw_y)); 653 654 if (irb) { 655 draw_x = irb->draw_x; 656 draw_y = irb->draw_y; 657 } else if (idrb) { 658 draw_x = idrb->draw_x; 659 draw_y = idrb->draw_y; 660 } else { 661 draw_x = 0; 662 draw_y = 0; 663 } 664 665 draw_offset = (draw_y << 16) | draw_x; 666 667 FALLBACK(intel, I915_FALLBACK_DRAW_OFFSET, 668 (ctx->DrawBuffer->Width + draw_x > 2048) || 669 (ctx->DrawBuffer->Height + draw_y > 2048)); 670 /* When changing drawing rectangle offset, an MI_FLUSH is first required. */ 671 if (draw_offset != i915->last_draw_offset) { 672 state->Buffer[I915_DESTREG_DRAWRECT0] = MI_FLUSH | INHIBIT_FLUSH_RENDER_CACHE; 673 i915->last_draw_offset = draw_offset; 674 } else 675 state->Buffer[I915_DESTREG_DRAWRECT0] = MI_NOOP; 676 677 state->Buffer[I915_DESTREG_DRAWRECT1] = _3DSTATE_DRAWRECT_INFO; 678 state->Buffer[I915_DESTREG_DRAWRECT2] = 0; 679 state->Buffer[I915_DESTREG_DRAWRECT3] = draw_offset; 680 state->Buffer[I915_DESTREG_DRAWRECT4] = 681 ((ctx->DrawBuffer->Width + draw_x - 1) & 0xffff) | 682 ((ctx->DrawBuffer->Height + draw_y - 1) << 16); 683 state->Buffer[I915_DESTREG_DRAWRECT5] = draw_offset; 684 685 I915_STATECHANGE(i915, I915_UPLOAD_BUFFERS); 686} 687 688static void 689i915_update_color_write_enable(struct i915_context *i915, bool enable) 690{ 691 uint32_t dw = i915->state.Ctx[I915_CTXREG_LIS6]; 692 if (enable) 693 dw |= S6_COLOR_WRITE_ENABLE; 694 else 695 dw &= ~S6_COLOR_WRITE_ENABLE; 696 if (dw != i915->state.Ctx[I915_CTXREG_LIS6]) { 697 I915_STATECHANGE(i915, I915_UPLOAD_CTX); 698 i915->state.Ctx[I915_CTXREG_LIS6] = dw; 699 } 700} 701 702/** 703 * Update the hardware state for drawing into a window or framebuffer object. 704 * 705 * Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other 706 * places within the driver. 707 * 708 * Basically, this needs to be called any time the current framebuffer 709 * changes, the renderbuffers change, or we need to draw into different 710 * color buffers. 711 */ 712static void 713i915_update_draw_buffer(struct intel_context *intel) 714{ 715 struct i915_context *i915 = (struct i915_context *)intel; 716 struct gl_context *ctx = &intel->ctx; 717 struct gl_framebuffer *fb = ctx->DrawBuffer; 718 struct intel_region *colorRegion = NULL, *depthRegion = NULL; 719 struct intel_renderbuffer *irbDepth = NULL, *irbStencil = NULL; 720 721 if (!fb) { 722 /* this can happen during the initial context initialization */ 723 return; 724 } 725 726 irbDepth = intel_get_renderbuffer(fb, BUFFER_DEPTH); 727 irbStencil = intel_get_renderbuffer(fb, BUFFER_STENCIL); 728 729 /* Do this here, not core Mesa, since this function is called from 730 * many places within the driver. 731 */ 732 if (ctx->NewState & _NEW_BUFFERS) { 733 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */ 734 _mesa_update_framebuffer(ctx, ctx->ReadBuffer, ctx->DrawBuffer); 735 /* this updates the DrawBuffer's Width/Height if it's a FBO */ 736 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer); 737 } 738 739 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) { 740 /* this may occur when we're called by glBindFrameBuffer() during 741 * the process of someone setting up renderbuffers, etc. 742 */ 743 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/ 744 return; 745 } 746 747 /* How many color buffers are we drawing into? 748 * 749 * If there is more than one drawbuffer (GL_FRONT_AND_BACK), or the 750 * drawbuffers are too big, we have to fallback to software. 751 */ 752 if ((fb->Width > ctx->Const.MaxRenderbufferSize) 753 || (fb->Height > ctx->Const.MaxRenderbufferSize)) { 754 FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, true); 755 } else if (fb->_NumColorDrawBuffers > 1) { 756 FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, true); 757 } else { 758 struct intel_renderbuffer *irb; 759 irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]); 760 colorRegion = (irb && irb->mt) ? irb->mt->region : NULL; 761 FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, false); 762 } 763 764 /* Check for depth fallback. */ 765 if (irbDepth && irbDepth->mt) { 766 FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false); 767 depthRegion = irbDepth->mt->region; 768 } else if (irbDepth && !irbDepth->mt) { 769 FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, true); 770 depthRegion = NULL; 771 } else { /* !irbDepth */ 772 /* No fallback is needed because there is no depth buffer. */ 773 FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false); 774 depthRegion = NULL; 775 } 776 777 /* Check for stencil fallback. */ 778 if (irbStencil && irbStencil->mt) { 779 assert(intel_rb_format(irbStencil) == MESA_FORMAT_Z24_UNORM_S8_UINT); 780 FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false); 781 } else if (irbStencil && !irbStencil->mt) { 782 FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, true); 783 } else { /* !irbStencil */ 784 /* No fallback is needed because there is no stencil buffer. */ 785 FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false); 786 } 787 788 /* If we have a (packed) stencil buffer attached but no depth buffer, 789 * we still need to set up the shared depth/stencil state so we can use it. 790 */ 791 if (depthRegion == NULL && irbStencil && irbStencil->mt 792 && intel_rb_format(irbStencil) == MESA_FORMAT_Z24_UNORM_S8_UINT) { 793 depthRegion = irbStencil->mt->region; 794 } 795 796 /* 797 * Update depth and stencil test state 798 */ 799 ctx->Driver.Enable(ctx, GL_DEPTH_TEST, ctx->Depth.Test); 800 ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled); 801 802 i915_update_color_write_enable(i915, colorRegion != NULL); 803 804 intel->vtbl.set_draw_region(intel, &colorRegion, depthRegion, 805 fb->_NumColorDrawBuffers); 806 intel->NewGLState |= _NEW_BUFFERS; 807 808 /* Set state we know depends on drawable parameters: 809 */ 810 intelCalcViewport(ctx); 811 ctx->Driver.Scissor(ctx); 812 813 /* Update culling direction which changes depending on the 814 * orientation of the buffer: 815 */ 816 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace); 817} 818 819static void 820i915_new_batch(struct intel_context *intel) 821{ 822 struct i915_context *i915 = i915_context(&intel->ctx); 823 824 /* Mark all state as needing to be emitted when starting a new batchbuffer. 825 * Using hardware contexts would be an alternative, but they have some 826 * difficulties associated with them (physical address requirements). 827 */ 828 i915->state.emitted = 0; 829 i915->last_draw_offset = 0; 830 i915->last_sampler = 0; 831 832 i915->current_vb_bo = NULL; 833 i915->current_vertex_size = 0; 834} 835 836static void 837i915_assert_not_dirty( struct intel_context *intel ) 838{ 839 struct i915_context *i915 = i915_context(&intel->ctx); 840 GLuint dirty = get_dirty(&i915->state); 841 assert(!dirty); 842 (void) dirty; 843} 844 845static void 846i915_invalidate_state(struct intel_context *intel, GLuint new_state) 847{ 848 struct gl_context *ctx = &intel->ctx; 849 850 _swsetup_InvalidateState(ctx, new_state); 851 _tnl_InvalidateState(ctx, new_state); 852 _tnl_invalidate_vertex_state(ctx, new_state); 853} 854 855void 856i915InitVtbl(struct i915_context *i915) 857{ 858 i915->intel.vtbl.check_vertex_size = i915_check_vertex_size; 859 i915->intel.vtbl.destroy = i915_destroy_context; 860 i915->intel.vtbl.emit_state = i915_emit_state; 861 i915->intel.vtbl.new_batch = i915_new_batch; 862 i915->intel.vtbl.reduced_primitive_state = i915_reduced_primitive_state; 863 i915->intel.vtbl.render_start = i915_render_start; 864 i915->intel.vtbl.render_prevalidate = i915_render_prevalidate; 865 i915->intel.vtbl.set_draw_region = i915_set_draw_region; 866 i915->intel.vtbl.update_draw_buffer = i915_update_draw_buffer; 867 i915->intel.vtbl.update_texture_state = i915UpdateTextureState; 868 i915->intel.vtbl.assert_not_dirty = i915_assert_not_dirty; 869 i915->intel.vtbl.finish_batch = intel_finish_vb; 870 i915->intel.vtbl.invalidate_state = i915_invalidate_state; 871 i915->intel.vtbl.render_target_supported = i915_render_target_supported; 872} 873