brw_draw.c revision 1b2baf3b08d545c772e9636fb0a0614c489c3916
1/************************************************************************** 2 * 3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include <sys/errno.h> 29 30#include "main/glheader.h" 31#include "main/context.h" 32#include "main/condrender.h" 33#include "main/samplerobj.h" 34#include "main/state.h" 35#include "main/enums.h" 36#include "tnl/tnl.h" 37#include "vbo/vbo_context.h" 38#include "swrast/swrast.h" 39#include "swrast_setup/swrast_setup.h" 40 41#include "brw_draw.h" 42#include "brw_defines.h" 43#include "brw_context.h" 44#include "brw_state.h" 45 46#include "intel_batchbuffer.h" 47#include "intel_fbo.h" 48#include "intel_mipmap_tree.h" 49#include "intel_regions.h" 50 51#define FILE_DEBUG_FLAG DEBUG_PRIMS 52 53static GLuint prim_to_hw_prim[GL_POLYGON+1] = { 54 _3DPRIM_POINTLIST, 55 _3DPRIM_LINELIST, 56 _3DPRIM_LINELOOP, 57 _3DPRIM_LINESTRIP, 58 _3DPRIM_TRILIST, 59 _3DPRIM_TRISTRIP, 60 _3DPRIM_TRIFAN, 61 _3DPRIM_QUADLIST, 62 _3DPRIM_QUADSTRIP, 63 _3DPRIM_POLYGON 64}; 65 66 67static const GLenum reduced_prim[GL_POLYGON+1] = { 68 GL_POINTS, 69 GL_LINES, 70 GL_LINES, 71 GL_LINES, 72 GL_TRIANGLES, 73 GL_TRIANGLES, 74 GL_TRIANGLES, 75 GL_TRIANGLES, 76 GL_TRIANGLES, 77 GL_TRIANGLES 78}; 79 80 81/* When the primitive changes, set a state bit and re-validate. Not 82 * the nicest and would rather deal with this by having all the 83 * programs be immune to the active primitive (ie. cope with all 84 * possibilities). That may not be realistic however. 85 */ 86static void brw_set_prim(struct brw_context *brw, 87 const struct _mesa_prim *prim) 88{ 89 struct gl_context *ctx = &brw->intel.ctx; 90 uint32_t hw_prim = prim_to_hw_prim[prim->mode]; 91 92 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode)); 93 94 /* Slight optimization to avoid the GS program when not needed: 95 */ 96 if (prim->mode == GL_QUAD_STRIP && 97 ctx->Light.ShadeModel != GL_FLAT && 98 ctx->Polygon.FrontMode == GL_FILL && 99 ctx->Polygon.BackMode == GL_FILL) 100 hw_prim = _3DPRIM_TRISTRIP; 101 102 if (prim->mode == GL_QUADS && prim->count == 4 && 103 ctx->Light.ShadeModel != GL_FLAT && 104 ctx->Polygon.FrontMode == GL_FILL && 105 ctx->Polygon.BackMode == GL_FILL) { 106 hw_prim = _3DPRIM_TRIFAN; 107 } 108 109 if (hw_prim != brw->primitive) { 110 brw->primitive = hw_prim; 111 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE; 112 113 if (reduced_prim[prim->mode] != brw->intel.reduced_primitive) { 114 brw->intel.reduced_primitive = reduced_prim[prim->mode]; 115 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE; 116 } 117 } 118} 119 120static void gen6_set_prim(struct brw_context *brw, 121 const struct _mesa_prim *prim) 122{ 123 uint32_t hw_prim; 124 125 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode)); 126 127 if (brw->hiz.op) { 128 assert(prim->mode == GL_TRIANGLES); 129 hw_prim = _3DPRIM_RECTLIST; 130 } else { 131 hw_prim = prim_to_hw_prim[prim->mode]; 132 } 133 134 if (hw_prim != brw->primitive) { 135 brw->primitive = hw_prim; 136 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE; 137 } 138} 139 140 141static GLuint trim(GLenum prim, GLuint length) 142{ 143 if (prim == GL_QUAD_STRIP) 144 return length > 3 ? (length - length % 2) : 0; 145 else if (prim == GL_QUADS) 146 return length - length % 4; 147 else 148 return length; 149} 150 151 152static void brw_emit_prim(struct brw_context *brw, 153 const struct _mesa_prim *prim, 154 uint32_t hw_prim) 155{ 156 struct intel_context *intel = &brw->intel; 157 int verts_per_instance; 158 int vertex_access_type; 159 int start_vertex_location; 160 int base_vertex_location; 161 162 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode), 163 prim->start, prim->count); 164 165 start_vertex_location = prim->start; 166 base_vertex_location = prim->basevertex; 167 if (prim->indexed) { 168 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM; 169 start_vertex_location += brw->ib.start_vertex_offset; 170 base_vertex_location += brw->vb.start_vertex_bias; 171 } else { 172 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL; 173 start_vertex_location += brw->vb.start_vertex_bias; 174 } 175 176 verts_per_instance = trim(prim->mode, prim->count); 177 178 /* If nothing to emit, just return. */ 179 if (verts_per_instance == 0) 180 return; 181 182 /* If we're set to always flush, do it before and after the primitive emit. 183 * We want to catch both missed flushes that hurt instruction/state cache 184 * and missed flushes of the render cache as it heads to other parts of 185 * the besides the draw code. 186 */ 187 if (intel->always_flush_cache) { 188 intel_batchbuffer_emit_mi_flush(intel); 189 } 190 191 BEGIN_BATCH(6); 192 OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) | 193 hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT | 194 vertex_access_type); 195 OUT_BATCH(verts_per_instance); 196 OUT_BATCH(start_vertex_location); 197 OUT_BATCH(1); // instance count 198 OUT_BATCH(0); // start instance location 199 OUT_BATCH(base_vertex_location); 200 ADVANCE_BATCH(); 201 202 intel->batch.need_workaround_flush = true; 203 204 if (intel->always_flush_cache) { 205 intel_batchbuffer_emit_mi_flush(intel); 206 } 207} 208 209static void gen7_emit_prim(struct brw_context *brw, 210 const struct _mesa_prim *prim, 211 uint32_t hw_prim) 212{ 213 struct intel_context *intel = &brw->intel; 214 int verts_per_instance; 215 int vertex_access_type; 216 int start_vertex_location; 217 int base_vertex_location; 218 219 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode), 220 prim->start, prim->count); 221 222 start_vertex_location = prim->start; 223 base_vertex_location = prim->basevertex; 224 if (prim->indexed) { 225 vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM; 226 start_vertex_location += brw->ib.start_vertex_offset; 227 base_vertex_location += brw->vb.start_vertex_bias; 228 } else { 229 vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL; 230 start_vertex_location += brw->vb.start_vertex_bias; 231 } 232 233 verts_per_instance = trim(prim->mode, prim->count); 234 235 /* If nothing to emit, just return. */ 236 if (verts_per_instance == 0) 237 return; 238 239 /* If we're set to always flush, do it before and after the primitive emit. 240 * We want to catch both missed flushes that hurt instruction/state cache 241 * and missed flushes of the render cache as it heads to other parts of 242 * the besides the draw code. 243 */ 244 if (intel->always_flush_cache) { 245 intel_batchbuffer_emit_mi_flush(intel); 246 } 247 248 BEGIN_BATCH(7); 249 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2)); 250 OUT_BATCH(hw_prim | vertex_access_type); 251 OUT_BATCH(verts_per_instance); 252 OUT_BATCH(start_vertex_location); 253 OUT_BATCH(1); // instance count 254 OUT_BATCH(0); // start instance location 255 OUT_BATCH(base_vertex_location); 256 ADVANCE_BATCH(); 257 258 if (intel->always_flush_cache) { 259 intel_batchbuffer_emit_mi_flush(intel); 260 } 261} 262 263 264static void brw_merge_inputs( struct brw_context *brw, 265 const struct gl_client_array *arrays[]) 266{ 267 struct brw_vertex_info old = brw->vb.info; 268 GLuint i; 269 270 for (i = 0; i < brw->vb.nr_buffers; i++) { 271 drm_intel_bo_unreference(brw->vb.buffers[i].bo); 272 brw->vb.buffers[i].bo = NULL; 273 } 274 brw->vb.nr_buffers = 0; 275 276 memset(&brw->vb.info, 0, sizeof(brw->vb.info)); 277 278 for (i = 0; i < VERT_ATTRIB_MAX; i++) { 279 brw->vb.inputs[i].buffer = -1; 280 brw->vb.inputs[i].glarray = arrays[i]; 281 brw->vb.inputs[i].attrib = (gl_vert_attrib) i; 282 283 if (arrays[i]->StrideB != 0) 284 brw->vb.info.sizes[i/16] |= (brw->vb.inputs[i].glarray->Size - 1) << 285 ((i%16) * 2); 286 } 287 288 /* Raise statechanges if input sizes have changed. */ 289 if (memcmp(brw->vb.info.sizes, old.sizes, sizeof(old.sizes)) != 0) 290 brw->state.dirty.brw |= BRW_NEW_INPUT_DIMENSIONS; 291} 292 293/* 294 * \brief Resolve buffers before drawing. 295 * 296 * Resolve the depth buffer's HiZ buffer and resolve the depth buffer of each 297 * enabled depth texture. 298 * 299 * (In the future, this will also perform MSAA resolves). 300 */ 301static void 302brw_predraw_resolve_buffers(struct brw_context *brw) 303{ 304 struct gl_context *ctx = &brw->intel.ctx; 305 struct intel_context *intel = &brw->intel; 306 struct intel_renderbuffer *depth_irb; 307 struct intel_texture_object *tex_obj; 308 bool did_resolve = false; 309 310 /* Avoid recursive HiZ op. */ 311 if (brw->hiz.op) { 312 return; 313 } 314 315 /* Resolve the depth buffer's HiZ buffer. */ 316 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH); 317 if (depth_irb && depth_irb->mt) { 318 did_resolve |= intel_renderbuffer_resolve_hiz(intel, depth_irb); 319 } 320 321 /* Resolve depth buffer of each enabled depth texture. */ 322 for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) { 323 if (!ctx->Texture.Unit[i]._ReallyEnabled) 324 continue; 325 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current); 326 if (!tex_obj || !tex_obj->mt) 327 continue; 328 did_resolve |= intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt); 329 } 330 331 if (did_resolve) { 332 /* Call vbo_bind_array() to synchronize the vbo module's vertex 333 * attributes to the gl_context's. 334 * 335 * Details 336 * ------- 337 * The vbo module tracks vertex attributes separately from the 338 * gl_context. Specifically, the vbo module maintins vertex attributes 339 * in vbo_exec_context::array::inputs, which is synchronized with 340 * gl_context::Array::ArrayObj::VertexAttrib by vbo_bind_array(). 341 * vbo_draw_arrays() calls vbo_bind_array() to perform the 342 * synchronization before calling the real draw call, 343 * vbo_context::draw_arrays. 344 * 345 * At this point (after performing a resolve meta-op but before calling 346 * vbo_bind_array), the gl_context's vertex attributes have been 347 * restored to their original state (that is, their state before the 348 * meta-op began), but the vbo module's vertex attribute are those used 349 * in the last meta-op. Therefore we must manually synchronize the two with 350 * vbo_bind_array() before continuing with the original draw command. 351 */ 352 _mesa_update_state(ctx); 353 vbo_bind_arrays(ctx); 354 _mesa_update_state(ctx); 355 } 356} 357 358/** 359 * \brief Call this after drawing to mark which buffers need resolving 360 * 361 * If the depth buffer was written to and if it has an accompanying HiZ 362 * buffer, then mark that it needs a depth resolve. 363 * 364 * (In the future, this will also mark needed MSAA resolves). 365 */ 366static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw) 367{ 368 struct gl_context *ctx = &brw->intel.ctx; 369 struct gl_framebuffer *fb = ctx->DrawBuffer; 370 struct intel_renderbuffer *depth_irb = 371 intel_get_renderbuffer(fb, BUFFER_DEPTH); 372 373 if (depth_irb && 374 ctx->Depth.Mask && 375 !brw->hiz.op) { 376 intel_renderbuffer_set_needs_depth_resolve(depth_irb); 377 } 378} 379 380/* May fail if out of video memory for texture or vbo upload, or on 381 * fallback conditions. 382 */ 383static bool brw_try_draw_prims( struct gl_context *ctx, 384 const struct gl_client_array *arrays[], 385 const struct _mesa_prim *prim, 386 GLuint nr_prims, 387 const struct _mesa_index_buffer *ib, 388 GLuint min_index, 389 GLuint max_index ) 390{ 391 struct intel_context *intel = intel_context(ctx); 392 struct brw_context *brw = brw_context(ctx); 393 bool retval = true; 394 GLuint i; 395 bool fail_next = false; 396 397 if (ctx->NewState) 398 _mesa_update_state( ctx ); 399 400 /* We have to validate the textures *before* checking for fallbacks; 401 * otherwise, the software fallback won't be able to rely on the 402 * texture state, the firstLevel and lastLevel fields won't be 403 * set in the intel texture object (they'll both be 0), and the 404 * software fallback will segfault if it attempts to access any 405 * texture level other than level 0. 406 */ 407 brw_validate_textures( brw ); 408 409 /* Resolves must occur after updating state and finalizing textures but 410 * before setting up any hardware state for this draw call. 411 */ 412 brw_predraw_resolve_buffers(brw); 413 414 /* Bind all inputs, derive varying and size information: 415 */ 416 brw_merge_inputs( brw, arrays ); 417 418 brw->ib.ib = ib; 419 brw->state.dirty.brw |= BRW_NEW_INDICES; 420 421 brw->vb.min_index = min_index; 422 brw->vb.max_index = max_index; 423 brw->state.dirty.brw |= BRW_NEW_VERTICES; 424 425 /* Have to validate state quite late. Will rebuild tnl_program, 426 * which depends on varying information. 427 * 428 * Note this is where brw->vs->prog_data.inputs_read is calculated, 429 * so can't access it earlier. 430 */ 431 432 intel_prepare_render(intel); 433 434 for (i = 0; i < nr_prims; i++) { 435 int estimated_max_prim_size; 436 437 estimated_max_prim_size = 512; /* batchbuffer commands */ 438 estimated_max_prim_size += (BRW_MAX_TEX_UNIT * 439 (sizeof(struct brw_sampler_state) + 440 sizeof(struct gen5_sampler_default_color))); 441 estimated_max_prim_size += 1024; /* gen6 VS push constants */ 442 estimated_max_prim_size += 1024; /* gen6 WM push constants */ 443 estimated_max_prim_size += 512; /* misc. pad */ 444 445 /* Flush the batch if it's approaching full, so that we don't wrap while 446 * we've got validated state that needs to be in the same batch as the 447 * primitives. 448 */ 449 intel_batchbuffer_require_space(intel, estimated_max_prim_size, false); 450 intel_batchbuffer_save_state(intel); 451 452 if (intel->gen < 6) 453 brw_set_prim(brw, &prim[i]); 454 else 455 gen6_set_prim(brw, &prim[i]); 456 457retry: 458 /* Note that before the loop, brw->state.dirty.brw was set to != 0, and 459 * that the state updated in the loop outside of this block is that in 460 * *_set_prim or intel_batchbuffer_flush(), which only impacts 461 * brw->state.dirty.brw. 462 */ 463 if (brw->state.dirty.brw) { 464 intel->no_batch_wrap = true; 465 brw_upload_state(brw); 466 467 if (unlikely(brw->intel.Fallback)) { 468 intel->no_batch_wrap = false; 469 retval = false; 470 goto out; 471 } 472 } 473 474 if (intel->gen >= 7) 475 gen7_emit_prim(brw, &prim[i], brw->primitive); 476 else 477 brw_emit_prim(brw, &prim[i], brw->primitive); 478 479 intel->no_batch_wrap = false; 480 481 if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) { 482 if (!fail_next) { 483 intel_batchbuffer_reset_to_saved(intel); 484 intel_batchbuffer_flush(intel); 485 fail_next = true; 486 goto retry; 487 } else { 488 if (intel_batchbuffer_flush(intel) == -ENOSPC) { 489 static bool warned = false; 490 491 if (!warned) { 492 fprintf(stderr, "i965: Single primitive emit exceeded" 493 "available aperture space\n"); 494 warned = true; 495 } 496 497 retval = false; 498 } 499 } 500 } 501 } 502 503 if (intel->always_flush_batch) 504 intel_batchbuffer_flush(intel); 505 out: 506 507 brw_state_cache_check_size(brw); 508 brw_postdraw_set_buffers_need_resolve(brw); 509 510 return retval; 511} 512 513void brw_draw_prims( struct gl_context *ctx, 514 const struct gl_client_array *arrays[], 515 const struct _mesa_prim *prim, 516 GLuint nr_prims, 517 const struct _mesa_index_buffer *ib, 518 GLboolean index_bounds_valid, 519 GLuint min_index, 520 GLuint max_index ) 521{ 522 bool retval; 523 524 if (!_mesa_check_conditional_render(ctx)) 525 return; 526 527 if (!vbo_all_varyings_in_vbos(arrays)) { 528 if (!index_bounds_valid) 529 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index); 530 531 /* Decide if we want to rebase. If so we end up recursing once 532 * only into this function. 533 */ 534 if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) { 535 vbo_rebase_prims(ctx, arrays, 536 prim, nr_prims, 537 ib, min_index, max_index, 538 brw_draw_prims ); 539 return; 540 } 541 } 542 543 /* Make a first attempt at drawing: 544 */ 545 retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index); 546 547 /* Otherwise, we really are out of memory. Pass the drawing 548 * command to the software tnl module and which will in turn call 549 * swrast to do the drawing. 550 */ 551 if (!retval) { 552 _swsetup_Wakeup(ctx); 553 _tnl_wakeup(ctx); 554 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index); 555 } 556 557} 558 559void brw_draw_init( struct brw_context *brw ) 560{ 561 struct gl_context *ctx = &brw->intel.ctx; 562 struct vbo_context *vbo = vbo_context(ctx); 563 int i; 564 565 /* Register our drawing function: 566 */ 567 vbo->draw_prims = brw_draw_prims; 568 569 for (i = 0; i < VERT_ATTRIB_MAX; i++) 570 brw->vb.inputs[i].buffer = -1; 571 brw->vb.nr_buffers = 0; 572 brw->vb.nr_enabled = 0; 573} 574 575void brw_draw_destroy( struct brw_context *brw ) 576{ 577 int i; 578 579 for (i = 0; i < brw->vb.nr_buffers; i++) { 580 drm_intel_bo_unreference(brw->vb.buffers[i].bo); 581 brw->vb.buffers[i].bo = NULL; 582 } 583 brw->vb.nr_buffers = 0; 584 585 for (i = 0; i < brw->vb.nr_enabled; i++) { 586 brw->vb.enabled[i]->buffer = -1; 587 } 588 brw->vb.nr_enabled = 0; 589 590 drm_intel_bo_unreference(brw->ib.bo); 591 brw->ib.bo = NULL; 592} 593