brw_draw_upload.c revision 04a11b5f5e22155e5816e2da560b485eb0eaaec9
1/************************************************************************** 2 * 3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#undef NDEBUG 29 30#include "main/glheader.h" 31#include "main/bufferobj.h" 32#include "main/context.h" 33#include "main/enums.h" 34#include "main/macros.h" 35 36#include "brw_draw.h" 37#include "brw_defines.h" 38#include "brw_context.h" 39#include "brw_state.h" 40 41#include "intel_batchbuffer.h" 42#include "intel_buffer_objects.h" 43 44static GLuint double_types[5] = { 45 0, 46 BRW_SURFACEFORMAT_R64_FLOAT, 47 BRW_SURFACEFORMAT_R64G64_FLOAT, 48 BRW_SURFACEFORMAT_R64G64B64_FLOAT, 49 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT 50}; 51 52static GLuint float_types[5] = { 53 0, 54 BRW_SURFACEFORMAT_R32_FLOAT, 55 BRW_SURFACEFORMAT_R32G32_FLOAT, 56 BRW_SURFACEFORMAT_R32G32B32_FLOAT, 57 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT 58}; 59 60static GLuint half_float_types[5] = { 61 0, 62 BRW_SURFACEFORMAT_R16_FLOAT, 63 BRW_SURFACEFORMAT_R16G16_FLOAT, 64 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT, 65 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT 66}; 67 68static GLuint uint_types_direct[5] = { 69 0, 70 BRW_SURFACEFORMAT_R32_UINT, 71 BRW_SURFACEFORMAT_R32G32_UINT, 72 BRW_SURFACEFORMAT_R32G32B32_UINT, 73 BRW_SURFACEFORMAT_R32G32B32A32_UINT 74}; 75 76static GLuint uint_types_norm[5] = { 77 0, 78 BRW_SURFACEFORMAT_R32_UNORM, 79 BRW_SURFACEFORMAT_R32G32_UNORM, 80 BRW_SURFACEFORMAT_R32G32B32_UNORM, 81 BRW_SURFACEFORMAT_R32G32B32A32_UNORM 82}; 83 84static GLuint uint_types_scale[5] = { 85 0, 86 BRW_SURFACEFORMAT_R32_USCALED, 87 BRW_SURFACEFORMAT_R32G32_USCALED, 88 BRW_SURFACEFORMAT_R32G32B32_USCALED, 89 BRW_SURFACEFORMAT_R32G32B32A32_USCALED 90}; 91 92static GLuint int_types_direct[5] = { 93 0, 94 BRW_SURFACEFORMAT_R32_SINT, 95 BRW_SURFACEFORMAT_R32G32_SINT, 96 BRW_SURFACEFORMAT_R32G32B32_SINT, 97 BRW_SURFACEFORMAT_R32G32B32A32_SINT 98}; 99 100static GLuint int_types_norm[5] = { 101 0, 102 BRW_SURFACEFORMAT_R32_SNORM, 103 BRW_SURFACEFORMAT_R32G32_SNORM, 104 BRW_SURFACEFORMAT_R32G32B32_SNORM, 105 BRW_SURFACEFORMAT_R32G32B32A32_SNORM 106}; 107 108static GLuint int_types_scale[5] = { 109 0, 110 BRW_SURFACEFORMAT_R32_SSCALED, 111 BRW_SURFACEFORMAT_R32G32_SSCALED, 112 BRW_SURFACEFORMAT_R32G32B32_SSCALED, 113 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED 114}; 115 116static GLuint ushort_types_direct[5] = { 117 0, 118 BRW_SURFACEFORMAT_R16_UINT, 119 BRW_SURFACEFORMAT_R16G16_UINT, 120 BRW_SURFACEFORMAT_R16G16B16A16_UINT, 121 BRW_SURFACEFORMAT_R16G16B16A16_UINT 122}; 123 124static GLuint ushort_types_norm[5] = { 125 0, 126 BRW_SURFACEFORMAT_R16_UNORM, 127 BRW_SURFACEFORMAT_R16G16_UNORM, 128 BRW_SURFACEFORMAT_R16G16B16_UNORM, 129 BRW_SURFACEFORMAT_R16G16B16A16_UNORM 130}; 131 132static GLuint ushort_types_scale[5] = { 133 0, 134 BRW_SURFACEFORMAT_R16_USCALED, 135 BRW_SURFACEFORMAT_R16G16_USCALED, 136 BRW_SURFACEFORMAT_R16G16B16_USCALED, 137 BRW_SURFACEFORMAT_R16G16B16A16_USCALED 138}; 139 140static GLuint short_types_direct[5] = { 141 0, 142 BRW_SURFACEFORMAT_R16_SINT, 143 BRW_SURFACEFORMAT_R16G16_SINT, 144 BRW_SURFACEFORMAT_R16G16B16A16_SINT, 145 BRW_SURFACEFORMAT_R16G16B16A16_SINT 146}; 147 148static GLuint short_types_norm[5] = { 149 0, 150 BRW_SURFACEFORMAT_R16_SNORM, 151 BRW_SURFACEFORMAT_R16G16_SNORM, 152 BRW_SURFACEFORMAT_R16G16B16_SNORM, 153 BRW_SURFACEFORMAT_R16G16B16A16_SNORM 154}; 155 156static GLuint short_types_scale[5] = { 157 0, 158 BRW_SURFACEFORMAT_R16_SSCALED, 159 BRW_SURFACEFORMAT_R16G16_SSCALED, 160 BRW_SURFACEFORMAT_R16G16B16_SSCALED, 161 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED 162}; 163 164static GLuint ubyte_types_direct[5] = { 165 0, 166 BRW_SURFACEFORMAT_R8_UINT, 167 BRW_SURFACEFORMAT_R8G8_UINT, 168 BRW_SURFACEFORMAT_R8G8B8A8_UINT, 169 BRW_SURFACEFORMAT_R8G8B8A8_UINT 170}; 171 172static GLuint ubyte_types_norm[5] = { 173 0, 174 BRW_SURFACEFORMAT_R8_UNORM, 175 BRW_SURFACEFORMAT_R8G8_UNORM, 176 BRW_SURFACEFORMAT_R8G8B8_UNORM, 177 BRW_SURFACEFORMAT_R8G8B8A8_UNORM 178}; 179 180static GLuint ubyte_types_scale[5] = { 181 0, 182 BRW_SURFACEFORMAT_R8_USCALED, 183 BRW_SURFACEFORMAT_R8G8_USCALED, 184 BRW_SURFACEFORMAT_R8G8B8_USCALED, 185 BRW_SURFACEFORMAT_R8G8B8A8_USCALED 186}; 187 188static GLuint byte_types_direct[5] = { 189 0, 190 BRW_SURFACEFORMAT_R8_SINT, 191 BRW_SURFACEFORMAT_R8G8_SINT, 192 BRW_SURFACEFORMAT_R8G8B8A8_SINT, 193 BRW_SURFACEFORMAT_R8G8B8A8_SINT 194}; 195 196static GLuint byte_types_norm[5] = { 197 0, 198 BRW_SURFACEFORMAT_R8_SNORM, 199 BRW_SURFACEFORMAT_R8G8_SNORM, 200 BRW_SURFACEFORMAT_R8G8B8_SNORM, 201 BRW_SURFACEFORMAT_R8G8B8A8_SNORM 202}; 203 204static GLuint byte_types_scale[5] = { 205 0, 206 BRW_SURFACEFORMAT_R8_SSCALED, 207 BRW_SURFACEFORMAT_R8G8_SSCALED, 208 BRW_SURFACEFORMAT_R8G8B8_SSCALED, 209 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED 210}; 211 212 213/** 214 * Given vertex array type/size/format/normalized info, return 215 * the appopriate hardware surface type. 216 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays. 217 */ 218static GLuint get_surface_type( GLenum type, GLuint size, 219 GLenum format, bool normalized, bool integer ) 220{ 221 if (unlikely(INTEL_DEBUG & DEBUG_VERTS)) 222 printf("type %s size %d normalized %d\n", 223 _mesa_lookup_enum_by_nr(type), size, normalized); 224 225 if (integer) { 226 assert(format == GL_RGBA); /* sanity check */ 227 switch (type) { 228 case GL_INT: return int_types_direct[size]; 229 case GL_SHORT: return short_types_direct[size]; 230 case GL_BYTE: return byte_types_direct[size]; 231 case GL_UNSIGNED_INT: return uint_types_direct[size]; 232 case GL_UNSIGNED_SHORT: return ushort_types_direct[size]; 233 case GL_UNSIGNED_BYTE: return ubyte_types_direct[size]; 234 default: assert(0); return 0; 235 } 236 } else if (normalized) { 237 switch (type) { 238 case GL_DOUBLE: return double_types[size]; 239 case GL_FLOAT: return float_types[size]; 240 case GL_HALF_FLOAT: return half_float_types[size]; 241 case GL_INT: return int_types_norm[size]; 242 case GL_SHORT: return short_types_norm[size]; 243 case GL_BYTE: return byte_types_norm[size]; 244 case GL_UNSIGNED_INT: return uint_types_norm[size]; 245 case GL_UNSIGNED_SHORT: return ushort_types_norm[size]; 246 case GL_UNSIGNED_BYTE: 247 if (format == GL_BGRA) { 248 /* See GL_EXT_vertex_array_bgra */ 249 assert(size == 4); 250 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM; 251 } 252 else { 253 return ubyte_types_norm[size]; 254 } 255 default: assert(0); return 0; 256 } 257 } 258 else { 259 assert(format == GL_RGBA); /* sanity check */ 260 switch (type) { 261 case GL_DOUBLE: return double_types[size]; 262 case GL_FLOAT: return float_types[size]; 263 case GL_HALF_FLOAT: return half_float_types[size]; 264 case GL_INT: return int_types_scale[size]; 265 case GL_SHORT: return short_types_scale[size]; 266 case GL_BYTE: return byte_types_scale[size]; 267 case GL_UNSIGNED_INT: return uint_types_scale[size]; 268 case GL_UNSIGNED_SHORT: return ushort_types_scale[size]; 269 case GL_UNSIGNED_BYTE: return ubyte_types_scale[size]; 270 /* This produces GL_FIXED inputs as values between INT32_MIN and 271 * INT32_MAX, which will be scaled down by 1/65536 by the VS. 272 */ 273 case GL_FIXED: return int_types_scale[size]; 274 default: assert(0); return 0; 275 } 276 } 277} 278 279 280static GLuint get_size( GLenum type ) 281{ 282 switch (type) { 283 case GL_DOUBLE: return sizeof(GLdouble); 284 case GL_FLOAT: return sizeof(GLfloat); 285 case GL_HALF_FLOAT: return sizeof(GLhalfARB); 286 case GL_INT: return sizeof(GLint); 287 case GL_SHORT: return sizeof(GLshort); 288 case GL_BYTE: return sizeof(GLbyte); 289 case GL_UNSIGNED_INT: return sizeof(GLuint); 290 case GL_UNSIGNED_SHORT: return sizeof(GLushort); 291 case GL_UNSIGNED_BYTE: return sizeof(GLubyte); 292 case GL_FIXED: return sizeof(GLuint); 293 default: assert(0); return 0; 294 } 295} 296 297static GLuint get_index_type(GLenum type) 298{ 299 switch (type) { 300 case GL_UNSIGNED_BYTE: return BRW_INDEX_BYTE; 301 case GL_UNSIGNED_SHORT: return BRW_INDEX_WORD; 302 case GL_UNSIGNED_INT: return BRW_INDEX_DWORD; 303 default: assert(0); return 0; 304 } 305} 306 307static void 308copy_array_to_vbo_array(struct brw_context *brw, 309 struct brw_vertex_element *element, 310 int min, int max, 311 struct brw_vertex_buffer *buffer, 312 GLuint dst_stride) 313{ 314 if (min == -1) { 315 /* If we don't have computed min/max bounds, then this must be a use of 316 * the current attribute, which has a 0 stride. Otherwise, we wouldn't 317 * know what data to upload. 318 */ 319 assert(element->glarray->StrideB == 0); 320 321 intel_upload_data(&brw->intel, element->glarray->Ptr, 322 element->element_size, 323 element->element_size, 324 &buffer->bo, &buffer->offset); 325 326 buffer->stride = 0; 327 return; 328 } 329 330 int src_stride = element->glarray->StrideB; 331 const unsigned char *src = element->glarray->Ptr + min * src_stride; 332 int count = max - min + 1; 333 GLuint size = count * dst_stride; 334 335 if (dst_stride == src_stride) { 336 intel_upload_data(&brw->intel, src, size, dst_stride, 337 &buffer->bo, &buffer->offset); 338 } else { 339 char * const map = intel_upload_map(&brw->intel, size, dst_stride); 340 char *dst = map; 341 342 while (count--) { 343 memcpy(dst, src, dst_stride); 344 src += src_stride; 345 dst += dst_stride; 346 } 347 intel_upload_unmap(&brw->intel, map, size, dst_stride, 348 &buffer->bo, &buffer->offset); 349 } 350 buffer->stride = dst_stride; 351} 352 353static void brw_prepare_vertices(struct brw_context *brw) 354{ 355 struct gl_context *ctx = &brw->intel.ctx; 356 struct intel_context *intel = intel_context(ctx); 357 /* CACHE_NEW_VS_PROG */ 358 GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read; 359 const unsigned char *ptr = NULL; 360 GLuint interleaved = 0, total_size = 0; 361 unsigned int min_index = brw->vb.min_index; 362 unsigned int max_index = brw->vb.max_index; 363 int delta, i, j; 364 GLboolean can_merge_uploads = GL_TRUE; 365 366 struct brw_vertex_element *upload[VERT_ATTRIB_MAX]; 367 GLuint nr_uploads = 0; 368 369 /* _NEW_POLYGON 370 * 371 * On gen6+, edge flags don't end up in the VUE (either in or out of the 372 * VS). Instead, they're uploaded as the last vertex element, and the data 373 * is passed sideband through the fixed function units. So, we need to 374 * prepare the vertex buffer for it, but it's not present in inputs_read. 375 */ 376 if (intel->gen >= 6 && (ctx->Polygon.FrontMode != GL_FILL || 377 ctx->Polygon.BackMode != GL_FILL)) { 378 vs_inputs |= VERT_BIT_EDGEFLAG; 379 } 380 381 /* First build an array of pointers to ve's in vb.inputs_read 382 */ 383 if (0) 384 printf("%s %d..%d\n", __FUNCTION__, min_index, max_index); 385 386 /* Accumulate the list of enabled arrays. */ 387 brw->vb.nr_enabled = 0; 388 while (vs_inputs) { 389 GLuint i = ffsll(vs_inputs) - 1; 390 struct brw_vertex_element *input = &brw->vb.inputs[i]; 391 392 vs_inputs &= ~BITFIELD64_BIT(i); 393 if (input->glarray->Size && get_size(input->glarray->Type)) 394 brw->vb.enabled[brw->vb.nr_enabled++] = input; 395 } 396 397 if (brw->vb.nr_enabled == 0) 398 return; 399 400 if (brw->vb.nr_buffers) 401 goto prepare; 402 403 for (i = j = 0; i < brw->vb.nr_enabled; i++) { 404 struct brw_vertex_element *input = brw->vb.enabled[i]; 405 const struct gl_client_array *glarray = input->glarray; 406 int type_size = get_size(glarray->Type); 407 408 input->element_size = type_size * glarray->Size; 409 410 if (_mesa_is_bufferobj(glarray->BufferObj)) { 411 struct intel_buffer_object *intel_buffer = 412 intel_buffer_object(glarray->BufferObj); 413 int k; 414 415 for (k = 0; k < i; k++) { 416 const struct gl_client_array *other = brw->vb.enabled[k]->glarray; 417 if (glarray->BufferObj == other->BufferObj && 418 glarray->StrideB == other->StrideB && 419 glarray->InstanceDivisor == other->InstanceDivisor && 420 (uintptr_t)(glarray->Ptr - other->Ptr) < glarray->StrideB) 421 { 422 input->buffer = brw->vb.enabled[k]->buffer; 423 input->offset = glarray->Ptr - other->Ptr; 424 break; 425 } 426 } 427 if (k == i) { 428 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j]; 429 430 /* Named buffer object: Just reference its contents directly. */ 431 buffer->bo = intel_bufferobj_source(intel, 432 intel_buffer, type_size, 433 &buffer->offset); 434 drm_intel_bo_reference(buffer->bo); 435 buffer->offset += (uintptr_t)glarray->Ptr; 436 buffer->stride = glarray->StrideB; 437 buffer->step_rate = glarray->InstanceDivisor; 438 439 input->buffer = j++; 440 input->offset = 0; 441 } 442 443 /* This is a common place to reach if the user mistakenly supplies 444 * a pointer in place of a VBO offset. If we just let it go through, 445 * we may end up dereferencing a pointer beyond the bounds of the 446 * GTT. We would hope that the VBO's max_index would save us, but 447 * Mesa appears to hand us min/max values not clipped to the 448 * array object's _MaxElement, and _MaxElement frequently appears 449 * to be wrong anyway. 450 * 451 * The VBO spec allows application termination in this case, and it's 452 * probably a service to the poor programmer to do so rather than 453 * trying to just not render. 454 */ 455 assert(input->offset < brw->vb.buffers[input->buffer].bo->size); 456 } else { 457 /* Queue the buffer object up to be uploaded in the next pass, 458 * when we've decided if we're doing interleaved or not. 459 */ 460 if (nr_uploads == 0) { 461 /* Position array not properly enabled: 462 */ 463 if (input->attrib == VERT_ATTRIB_POS && glarray->StrideB == 0) { 464 intel->Fallback = true; /* boolean, not bitfield */ 465 return; 466 } 467 468 interleaved = glarray->StrideB; 469 ptr = glarray->Ptr; 470 } 471 else if (interleaved != glarray->StrideB || 472 (uintptr_t)(glarray->Ptr - ptr) > interleaved) 473 { 474 interleaved = 0; 475 } 476 else if ((uintptr_t)(glarray->Ptr - ptr) & (type_size -1)) 477 { 478 /* enforce natural alignment (for doubles) */ 479 interleaved = 0; 480 } 481 482 upload[nr_uploads++] = input; 483 484 total_size = ALIGN(total_size, type_size); 485 total_size += input->element_size; 486 487 if (glarray->InstanceDivisor != 0) { 488 can_merge_uploads = GL_FALSE; 489 } 490 } 491 } 492 493 /* If we need to upload all the arrays, then we can trim those arrays to 494 * only the used elements [min_index, max_index] so long as we adjust all 495 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias. 496 */ 497 brw->vb.start_vertex_bias = 0; 498 delta = min_index; 499 if (nr_uploads == brw->vb.nr_enabled) { 500 brw->vb.start_vertex_bias = -delta; 501 delta = 0; 502 } 503 if (delta && !brw->intel.intelScreen->relaxed_relocations) 504 min_index = delta = 0; 505 506 /* Handle any arrays to be uploaded. */ 507 if (nr_uploads > 1) { 508 if (interleaved && interleaved <= 2*total_size) { 509 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j]; 510 /* All uploads are interleaved, so upload the arrays together as 511 * interleaved. First, upload the contents and set up upload[0]. 512 */ 513 copy_array_to_vbo_array(brw, upload[0], min_index, max_index, 514 buffer, interleaved); 515 buffer->offset -= delta * interleaved; 516 517 for (i = 0; i < nr_uploads; i++) { 518 /* Then, just point upload[i] at upload[0]'s buffer. */ 519 upload[i]->offset = 520 ((const unsigned char *)upload[i]->glarray->Ptr - ptr); 521 upload[i]->buffer = j; 522 } 523 j++; 524 525 nr_uploads = 0; 526 } 527 else if ((total_size < 2048) && can_merge_uploads) { 528 /* Upload non-interleaved arrays into a single interleaved array */ 529 struct brw_vertex_buffer *buffer; 530 int count = MAX2(max_index - min_index + 1, 1); 531 int offset; 532 char *map; 533 534 map = intel_upload_map(&brw->intel, total_size * count, total_size); 535 for (i = offset = 0; i < nr_uploads; i++) { 536 const unsigned char *src = upload[i]->glarray->Ptr; 537 int size = upload[i]->element_size; 538 int stride = upload[i]->glarray->StrideB; 539 char *dst; 540 int n; 541 542 offset = ALIGN(offset, get_size(upload[i]->glarray->Type)); 543 dst = map + offset; 544 src += min_index * stride; 545 546 for (n = 0; n < count; n++) { 547 memcpy(dst, src, size); 548 src += stride; 549 dst += total_size; 550 } 551 552 upload[i]->offset = offset; 553 upload[i]->buffer = j; 554 555 offset += size; 556 } 557 assert(offset == total_size); 558 buffer = &brw->vb.buffers[j++]; 559 intel_upload_unmap(&brw->intel, map, offset * count, offset, 560 &buffer->bo, &buffer->offset); 561 buffer->stride = offset; 562 buffer->step_rate = 0; 563 buffer->offset -= delta * offset; 564 565 nr_uploads = 0; 566 } 567 } 568 /* Upload non-interleaved arrays */ 569 for (i = 0; i < nr_uploads; i++) { 570 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j]; 571 if (upload[i]->glarray->InstanceDivisor == 0) { 572 copy_array_to_vbo_array(brw, upload[i], min_index, max_index, 573 buffer, upload[i]->element_size); 574 } else { 575 /* This is an instanced attribute, since its InstanceDivisor 576 * is not zero. Therefore, its data will be stepped after the 577 * instanced draw has been run InstanceDivisor times. 578 */ 579 uint32_t instanced_attr_max_index = 580 (brw->num_instances - 1) / upload[i]->glarray->InstanceDivisor; 581 copy_array_to_vbo_array(brw, upload[i], 0, instanced_attr_max_index, 582 buffer, upload[i]->element_size); 583 } 584 buffer->offset -= delta * buffer->stride; 585 buffer->step_rate = upload[i]->glarray->InstanceDivisor; 586 upload[i]->buffer = j++; 587 upload[i]->offset = 0; 588 } 589 590 /* can we simply extend the current vb? */ 591 if (j == brw->vb.nr_current_buffers) { 592 int delta = 0; 593 for (i = 0; i < j; i++) { 594 int d; 595 596 if (brw->vb.current_buffers[i].handle != brw->vb.buffers[i].bo->handle || 597 brw->vb.current_buffers[i].stride != brw->vb.buffers[i].stride || 598 brw->vb.current_buffers[i].step_rate != brw->vb.buffers[i].step_rate) 599 break; 600 601 d = brw->vb.buffers[i].offset - brw->vb.current_buffers[i].offset; 602 if (d < 0) 603 break; 604 if (i == 0) 605 delta = d / brw->vb.current_buffers[i].stride; 606 if (delta * brw->vb.current_buffers[i].stride != d) 607 break; 608 } 609 610 if (i == j) { 611 brw->vb.start_vertex_bias += delta; 612 while (--j >= 0) 613 drm_intel_bo_unreference(brw->vb.buffers[j].bo); 614 j = 0; 615 } 616 } 617 618 brw->vb.nr_buffers = j; 619 620prepare: 621 brw_prepare_query_begin(brw); 622} 623 624static void brw_emit_vertices(struct brw_context *brw) 625{ 626 struct gl_context *ctx = &brw->intel.ctx; 627 struct intel_context *intel = intel_context(ctx); 628 GLuint i, nr_elements; 629 630 brw_prepare_vertices(brw); 631 632 brw_emit_query_begin(brw); 633 634 /* If the VS doesn't read any inputs (calculating vertex position from 635 * a state variable for some reason, for example), emit a single pad 636 * VERTEX_ELEMENT struct and bail. 637 * 638 * The stale VB state stays in place, but they don't do anything unless 639 * a VE loads from them. 640 */ 641 if (brw->vb.nr_enabled == 0) { 642 BEGIN_BATCH(3); 643 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1); 644 if (intel->gen >= 6) { 645 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) | 646 GEN6_VE0_VALID | 647 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) | 648 (0 << BRW_VE0_SRC_OFFSET_SHIFT)); 649 } else { 650 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT) | 651 BRW_VE0_VALID | 652 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) | 653 (0 << BRW_VE0_SRC_OFFSET_SHIFT)); 654 } 655 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) | 656 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) | 657 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) | 658 (BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT)); 659 CACHED_BATCH(); 660 return; 661 } 662 663 /* Now emit VB and VEP state packets. 664 */ 665 666 if (brw->vb.nr_buffers) { 667 if (intel->gen >= 6) { 668 assert(brw->vb.nr_buffers <= 33); 669 } else { 670 assert(brw->vb.nr_buffers <= 17); 671 } 672 673 BEGIN_BATCH(1 + 4*brw->vb.nr_buffers); 674 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4*brw->vb.nr_buffers - 1)); 675 for (i = 0; i < brw->vb.nr_buffers; i++) { 676 struct brw_vertex_buffer *buffer = &brw->vb.buffers[i]; 677 uint32_t dw0; 678 679 if (intel->gen >= 6) { 680 dw0 = buffer->step_rate 681 ? GEN6_VB0_ACCESS_INSTANCEDATA 682 : GEN6_VB0_ACCESS_VERTEXDATA; 683 dw0 |= i << GEN6_VB0_INDEX_SHIFT; 684 } else { 685 dw0 = buffer->step_rate 686 ? BRW_VB0_ACCESS_INSTANCEDATA 687 : BRW_VB0_ACCESS_VERTEXDATA; 688 dw0 |= i << BRW_VB0_INDEX_SHIFT; 689 } 690 691 if (intel->gen >= 7) 692 dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE; 693 694 OUT_BATCH(dw0 | (buffer->stride << BRW_VB0_PITCH_SHIFT)); 695 OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset); 696 if (intel->gen >= 5) { 697 OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->bo->size - 1); 698 } else 699 OUT_BATCH(0); 700 OUT_BATCH(buffer->step_rate); 701 702 brw->vb.current_buffers[i].handle = buffer->bo->handle; 703 brw->vb.current_buffers[i].offset = buffer->offset; 704 brw->vb.current_buffers[i].stride = buffer->stride; 705 brw->vb.current_buffers[i].step_rate = buffer->step_rate; 706 } 707 brw->vb.nr_current_buffers = i; 708 ADVANCE_BATCH(); 709 } 710 711 nr_elements = brw->vb.nr_enabled + brw->vs.prog_data->uses_vertexid; 712 713 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably 714 * for VertexID/InstanceID. 715 */ 716 if (intel->gen >= 6) { 717 assert(nr_elements <= 34); 718 } else { 719 assert(nr_elements <= 18); 720 } 721 722 struct brw_vertex_element *gen6_edgeflag_input = NULL; 723 724 BEGIN_BATCH(1 + nr_elements * 2); 725 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2 * nr_elements - 1)); 726 for (i = 0; i < brw->vb.nr_enabled; i++) { 727 struct brw_vertex_element *input = brw->vb.enabled[i]; 728 uint32_t format = get_surface_type(input->glarray->Type, 729 input->glarray->Size, 730 input->glarray->Format, 731 input->glarray->Normalized, 732 input->glarray->Integer); 733 uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC; 734 uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC; 735 uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC; 736 uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC; 737 738 /* The gen4 driver expects edgeflag to come in as a float, and passes 739 * that float on to the tests in the clipper. Mesa's current vertex 740 * attribute value for EdgeFlag is stored as a float, which works out. 741 * glEdgeFlagPointer, on the other hand, gives us an unnormalized 742 * integer ubyte. Just rewrite that to convert to a float. 743 */ 744 if (input->attrib == VERT_ATTRIB_EDGEFLAG) { 745 /* Gen6+ passes edgeflag as sideband along with the vertex, instead 746 * of in the VUE. We have to upload it sideband as the last vertex 747 * element according to the B-Spec. 748 */ 749 if (intel->gen >= 6) { 750 gen6_edgeflag_input = input; 751 continue; 752 } 753 754 if (format == BRW_SURFACEFORMAT_R8_UINT) 755 format = BRW_SURFACEFORMAT_R8_SSCALED; 756 } 757 758 switch (input->glarray->Size) { 759 case 0: comp0 = BRW_VE1_COMPONENT_STORE_0; 760 case 1: comp1 = BRW_VE1_COMPONENT_STORE_0; 761 case 2: comp2 = BRW_VE1_COMPONENT_STORE_0; 762 case 3: comp3 = input->glarray->Integer ? BRW_VE1_COMPONENT_STORE_1_INT 763 : BRW_VE1_COMPONENT_STORE_1_FLT; 764 break; 765 } 766 767 if (intel->gen >= 6) { 768 OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) | 769 GEN6_VE0_VALID | 770 (format << BRW_VE0_FORMAT_SHIFT) | 771 (input->offset << BRW_VE0_SRC_OFFSET_SHIFT)); 772 } else { 773 OUT_BATCH((input->buffer << BRW_VE0_INDEX_SHIFT) | 774 BRW_VE0_VALID | 775 (format << BRW_VE0_FORMAT_SHIFT) | 776 (input->offset << BRW_VE0_SRC_OFFSET_SHIFT)); 777 } 778 779 if (intel->gen >= 5) 780 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) | 781 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) | 782 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) | 783 (comp3 << BRW_VE1_COMPONENT_3_SHIFT)); 784 else 785 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) | 786 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) | 787 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) | 788 (comp3 << BRW_VE1_COMPONENT_3_SHIFT) | 789 ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT)); 790 } 791 792 if (intel->gen >= 6 && gen6_edgeflag_input) { 793 uint32_t format = get_surface_type(gen6_edgeflag_input->glarray->Type, 794 gen6_edgeflag_input->glarray->Size, 795 gen6_edgeflag_input->glarray->Format, 796 gen6_edgeflag_input->glarray->Normalized, 797 gen6_edgeflag_input->glarray->Integer); 798 799 OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) | 800 GEN6_VE0_VALID | 801 GEN6_VE0_EDGE_FLAG_ENABLE | 802 (format << BRW_VE0_FORMAT_SHIFT) | 803 (gen6_edgeflag_input->offset << BRW_VE0_SRC_OFFSET_SHIFT)); 804 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) | 805 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) | 806 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) | 807 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT)); 808 } 809 810 if (brw->vs.prog_data->uses_vertexid) { 811 uint32_t dw0 = 0, dw1 = 0; 812 813 dw1 = ((BRW_VE1_COMPONENT_STORE_VID << BRW_VE1_COMPONENT_0_SHIFT) | 814 (BRW_VE1_COMPONENT_STORE_IID << BRW_VE1_COMPONENT_1_SHIFT) | 815 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) | 816 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT)); 817 818 if (intel->gen >= 6) { 819 dw0 |= GEN6_VE0_VALID; 820 } else { 821 dw0 |= BRW_VE0_VALID; 822 dw1 |= (i * 4) << BRW_VE1_DST_OFFSET_SHIFT; 823 } 824 825 /* Note that for gl_VertexID, gl_InstanceID, and gl_PrimitiveID values, 826 * the format is ignored and the value is always int. 827 */ 828 829 OUT_BATCH(dw0); 830 OUT_BATCH(dw1); 831 } 832 833 CACHED_BATCH(); 834} 835 836const struct brw_tracked_state brw_vertices = { 837 .dirty = { 838 .mesa = _NEW_POLYGON, 839 .brw = BRW_NEW_BATCH | BRW_NEW_VERTICES, 840 .cache = CACHE_NEW_VS_PROG, 841 }, 842 .emit = brw_emit_vertices, 843}; 844 845static void brw_upload_indices(struct brw_context *brw) 846{ 847 struct gl_context *ctx = &brw->intel.ctx; 848 struct intel_context *intel = &brw->intel; 849 const struct _mesa_index_buffer *index_buffer = brw->ib.ib; 850 GLuint ib_size; 851 drm_intel_bo *bo = NULL; 852 struct gl_buffer_object *bufferobj; 853 GLuint offset; 854 GLuint ib_type_size; 855 856 if (index_buffer == NULL) 857 return; 858 859 ib_type_size = get_size(index_buffer->type); 860 ib_size = ib_type_size * index_buffer->count; 861 bufferobj = index_buffer->obj; 862 863 /* Turn into a proper VBO: 864 */ 865 if (!_mesa_is_bufferobj(bufferobj)) { 866 867 /* Get new bufferobj, offset: 868 */ 869 intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size, 870 &bo, &offset); 871 brw->ib.start_vertex_offset = offset / ib_type_size; 872 } else { 873 offset = (GLuint) (unsigned long) index_buffer->ptr; 874 875 /* If the index buffer isn't aligned to its element size, we have to 876 * rebase it into a temporary. 877 */ 878 if ((get_size(index_buffer->type) - 1) & offset) { 879 GLubyte *map = ctx->Driver.MapBufferRange(ctx, 880 offset, 881 ib_size, 882 GL_MAP_WRITE_BIT, 883 bufferobj); 884 885 intel_upload_data(&brw->intel, map, ib_size, ib_type_size, 886 &bo, &offset); 887 brw->ib.start_vertex_offset = offset / ib_type_size; 888 889 ctx->Driver.UnmapBuffer(ctx, bufferobj); 890 } else { 891 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading 892 * the index buffer state when we're just moving the start index 893 * of our drawing. 894 */ 895 brw->ib.start_vertex_offset = offset / ib_type_size; 896 897 bo = intel_bufferobj_source(intel, 898 intel_buffer_object(bufferobj), 899 ib_type_size, 900 &offset); 901 drm_intel_bo_reference(bo); 902 903 brw->ib.start_vertex_offset += offset / ib_type_size; 904 } 905 } 906 907 if (brw->ib.bo != bo) { 908 drm_intel_bo_unreference(brw->ib.bo); 909 brw->ib.bo = bo; 910 911 brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; 912 } else { 913 drm_intel_bo_unreference(bo); 914 } 915 916 if (index_buffer->type != brw->ib.type) { 917 brw->ib.type = index_buffer->type; 918 brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; 919 } 920} 921 922const struct brw_tracked_state brw_indices = { 923 .dirty = { 924 .mesa = 0, 925 .brw = BRW_NEW_INDICES, 926 .cache = 0, 927 }, 928 .emit = brw_upload_indices, 929}; 930 931static void brw_emit_index_buffer(struct brw_context *brw) 932{ 933 struct intel_context *intel = &brw->intel; 934 const struct _mesa_index_buffer *index_buffer = brw->ib.ib; 935 GLuint cut_index_setting; 936 937 if (index_buffer == NULL) 938 return; 939 940 if (brw->prim_restart.enable_cut_index) { 941 cut_index_setting = BRW_CUT_INDEX_ENABLE; 942 } else { 943 cut_index_setting = 0; 944 } 945 946 BEGIN_BATCH(3); 947 OUT_BATCH(CMD_INDEX_BUFFER << 16 | 948 cut_index_setting | 949 get_index_type(index_buffer->type) << 8 | 950 1); 951 OUT_RELOC(brw->ib.bo, 952 I915_GEM_DOMAIN_VERTEX, 0, 953 0); 954 OUT_RELOC(brw->ib.bo, 955 I915_GEM_DOMAIN_VERTEX, 0, 956 brw->ib.bo->size - 1); 957 ADVANCE_BATCH(); 958} 959 960const struct brw_tracked_state brw_index_buffer = { 961 .dirty = { 962 .mesa = 0, 963 .brw = BRW_NEW_BATCH | BRW_NEW_INDEX_BUFFER, 964 .cache = 0, 965 }, 966 .emit = brw_emit_index_buffer, 967}; 968