u_vbuf.c revision 2b851526c1c047bba7ebb7e51706b1694f027947
1/************************************************************************** 2 * 3 * Copyright 2011 Marek Olšák <maraeo@gmail.com> 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "util/u_vbuf.h" 29 30#include "util/u_dump.h" 31#include "util/u_format.h" 32#include "util/u_inlines.h" 33#include "util/u_memory.h" 34#include "util/u_upload_mgr.h" 35#include "translate/translate.h" 36#include "translate/translate_cache.h" 37 38struct u_vbuf_elements { 39 unsigned count; 40 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS]; 41 42 unsigned src_format_size[PIPE_MAX_ATTRIBS]; 43 44 /* If (velem[i].src_format != native_format[i]), the vertex buffer 45 * referenced by the vertex element cannot be used for rendering and 46 * its vertex data must be translated to native_format[i]. */ 47 enum pipe_format native_format[PIPE_MAX_ATTRIBS]; 48 unsigned native_format_size[PIPE_MAX_ATTRIBS]; 49 50 /* This might mean two things: 51 * - src_format != native_format, as discussed above. 52 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */ 53 boolean incompatible_layout; 54 /* Per-element flags. */ 55 boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS]; 56}; 57 58enum { 59 VB_VERTEX = 0, 60 VB_INSTANCE = 1, 61 VB_CONST = 2, 62 VB_NUM = 3 63}; 64 65struct u_vbuf_priv { 66 struct u_vbuf b; 67 struct pipe_context *pipe; 68 struct translate_cache *translate_cache; 69 70 /* Vertex element state bound by the state tracker. */ 71 void *saved_ve; 72 /* and its associated helper structure for this module. */ 73 struct u_vbuf_elements *ve; 74 75 /* Vertex elements used for the translate fallback. */ 76 struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS]; 77 /* If non-NULL, this is a vertex element state used for the translate 78 * fallback and therefore used for rendering too. */ 79 void *fallback_ve; 80 /* The vertex buffer slot index where translated vertices have been 81 * stored in. */ 82 unsigned fallback_vbs[VB_NUM]; 83 /* When binding the fallback vertex element state, we don't want to 84 * change saved_ve and ve. This is set to TRUE in such cases. */ 85 boolean ve_binding_lock; 86 87 /* Whether there is any user buffer. */ 88 boolean any_user_vbs; 89 /* Whether there is a buffer with a non-native layout. */ 90 boolean incompatible_vb_layout; 91 /* Per-buffer flags. */ 92 boolean incompatible_vb[PIPE_MAX_ATTRIBS]; 93}; 94 95static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr) 96{ 97 struct pipe_screen *screen = mgr->pipe->screen; 98 99 mgr->b.caps.format_fixed32 = 100 screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER, 101 0, PIPE_BIND_VERTEX_BUFFER); 102 103 mgr->b.caps.format_float16 = 104 screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER, 105 0, PIPE_BIND_VERTEX_BUFFER); 106 107 mgr->b.caps.format_float64 = 108 screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER, 109 0, PIPE_BIND_VERTEX_BUFFER); 110 111 mgr->b.caps.format_norm32 = 112 screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER, 113 0, PIPE_BIND_VERTEX_BUFFER) && 114 screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER, 115 0, PIPE_BIND_VERTEX_BUFFER); 116 117 mgr->b.caps.format_scaled32 = 118 screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER, 119 0, PIPE_BIND_VERTEX_BUFFER) && 120 screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER, 121 0, PIPE_BIND_VERTEX_BUFFER); 122} 123 124struct u_vbuf * 125u_vbuf_create(struct pipe_context *pipe, 126 unsigned upload_buffer_size, 127 unsigned upload_buffer_alignment, 128 unsigned upload_buffer_bind, 129 enum u_fetch_alignment fetch_alignment) 130{ 131 struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv); 132 133 mgr->pipe = pipe; 134 mgr->translate_cache = translate_cache_create(); 135 memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs)); 136 137 mgr->b.uploader = u_upload_create(pipe, upload_buffer_size, 138 upload_buffer_alignment, 139 upload_buffer_bind); 140 141 mgr->b.caps.fetch_dword_unaligned = 142 fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED; 143 144 u_vbuf_init_format_caps(mgr); 145 146 return &mgr->b; 147} 148 149void u_vbuf_destroy(struct u_vbuf *mgrb) 150{ 151 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 152 unsigned i; 153 154 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 155 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 156 } 157 for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { 158 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 159 } 160 161 translate_cache_destroy(mgr->translate_cache); 162 u_upload_destroy(mgr->b.uploader); 163 FREE(mgr); 164} 165 166static void 167u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, 168 unsigned vb_mask, unsigned out_vb, 169 int start_vertex, unsigned num_vertices) 170{ 171 struct translate *tr; 172 struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; 173 struct pipe_resource *out_buffer = NULL; 174 uint8_t *out_map; 175 unsigned i, out_offset; 176 177 /* Get a translate object. */ 178 tr = translate_cache_find(mgr->translate_cache, key); 179 180 /* Map buffers we want to translate. */ 181 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 182 if (vb_mask & (1 << i)) { 183 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; 184 unsigned offset = vb->buffer_offset + vb->stride * start_vertex; 185 uint8_t *map; 186 187 if (u_vbuf_resource(vb->buffer)->user_ptr) { 188 map = u_vbuf_resource(vb->buffer)->user_ptr + offset; 189 } else { 190 unsigned size = vb->stride ? num_vertices * vb->stride 191 : sizeof(double)*4; 192 193 if (offset+size > vb->buffer->width0) { 194 size = vb->buffer->width0 - offset; 195 } 196 197 map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size, 198 PIPE_TRANSFER_READ, &vb_transfer[i]); 199 } 200 201 tr->set_buffer(tr, i, map, vb->stride, ~0); 202 } 203 } 204 205 /* Create and map the output buffer. */ 206 u_upload_alloc(mgr->b.uploader, 207 key->output_stride * start_vertex, 208 key->output_stride * num_vertices, 209 &out_offset, &out_buffer, 210 (void**)&out_map); 211 212 out_offset -= key->output_stride * start_vertex; 213 214 /* Translate. */ 215 tr->run(tr, 0, num_vertices, 0, out_map); 216 217 /* Unmap all buffers. */ 218 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 219 if (vb_transfer[i]) { 220 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); 221 } 222 } 223 224 /* Setup the new vertex buffer. */ 225 mgr->b.real_vertex_buffer[out_vb].buffer_offset = out_offset; 226 mgr->b.real_vertex_buffer[out_vb].stride = key->output_stride; 227 228 /* Move the buffer reference. */ 229 pipe_resource_reference( 230 &mgr->b.real_vertex_buffer[out_vb].buffer, NULL); 231 mgr->b.real_vertex_buffer[out_vb].buffer = out_buffer; 232} 233 234static boolean 235u_vbuf_translate_find_free_vb_slots(struct u_vbuf_priv *mgr, 236 unsigned mask[VB_NUM]) 237{ 238 unsigned i, type; 239 unsigned nr = mgr->ve->count; 240 boolean used_vb[PIPE_MAX_ATTRIBS] = {0}; 241 unsigned fallback_vbs[VB_NUM]; 242 243 memset(fallback_vbs, ~0, sizeof(fallback_vbs)); 244 245 /* Mark used vertex buffers as... used. */ 246 for (i = 0; i < nr; i++) { 247 if (!mgr->ve->incompatible_layout_elem[i]) { 248 unsigned index = mgr->ve->ve[i].vertex_buffer_index; 249 250 if (!mgr->incompatible_vb[index]) { 251 used_vb[index] = TRUE; 252 } 253 } 254 } 255 256 /* Find free slots for each type if needed. */ 257 i = 0; 258 for (type = 0; type < VB_NUM; type++) { 259 if (mask[type]) { 260 for (; i < PIPE_MAX_ATTRIBS; i++) { 261 if (!used_vb[i]) { 262 /*printf("found slot=%i for type=%i\n", i, type);*/ 263 fallback_vbs[type] = i; 264 i++; 265 if (i > mgr->b.nr_real_vertex_buffers) { 266 mgr->b.nr_real_vertex_buffers = i; 267 } 268 break; 269 } 270 } 271 if (i == PIPE_MAX_ATTRIBS) { 272 /* fail, reset the number to its original value */ 273 mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; 274 return FALSE; 275 } 276 } 277 } 278 279 memcpy(mgr->fallback_vbs, fallback_vbs, sizeof(fallback_vbs)); 280 return TRUE; 281} 282 283static boolean 284u_vbuf_translate_begin(struct u_vbuf_priv *mgr, 285 int start_vertex, unsigned num_vertices, 286 int start_instance, unsigned num_instances) 287{ 288 unsigned mask[VB_NUM] = {0}; 289 struct translate_key key[VB_NUM]; 290 unsigned elem_index[VB_NUM][PIPE_MAX_ATTRIBS]; /* ... into key.elements */ 291 unsigned i, type; 292 293 int start[VB_NUM] = { 294 start_vertex, /* VERTEX */ 295 start_instance, /* INSTANCE */ 296 0 /* CONST */ 297 }; 298 299 unsigned num[VB_NUM] = { 300 num_vertices, /* VERTEX */ 301 num_instances, /* INSTANCE */ 302 1 /* CONST */ 303 }; 304 305 memset(key, 0, sizeof(key)); 306 memset(elem_index, ~0, sizeof(elem_index)); 307 308 /* See if there are vertex attribs of each type to translate and 309 * which ones. */ 310 for (i = 0; i < mgr->ve->count; i++) { 311 unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index; 312 313 if (!mgr->ve->incompatible_layout_elem[i] && 314 !mgr->incompatible_vb[vb_index]) { 315 continue; 316 } 317 318 if (!mgr->b.vertex_buffer[vb_index].stride) { 319 mask[VB_CONST] |= 1 << vb_index; 320 } else if (mgr->ve->ve[i].instance_divisor) { 321 mask[VB_INSTANCE] |= 1 << vb_index; 322 } else { 323 mask[VB_VERTEX] |= 1 << vb_index; 324 } 325 } 326 327 assert(mask[VB_VERTEX] || mask[VB_INSTANCE] || mask[VB_CONST]); 328 329 /* Find free vertex buffer slots. */ 330 if (!u_vbuf_translate_find_free_vb_slots(mgr, mask)) { 331 return FALSE; 332 } 333 334 /* Initialize the translate keys. */ 335 for (i = 0; i < mgr->ve->count; i++) { 336 struct translate_key *k; 337 struct translate_element *te; 338 unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index; 339 340 if (!mgr->ve->incompatible_layout_elem[i] && 341 !mgr->incompatible_vb[vb_index]) { 342 continue; 343 } 344 345 /* Set type to what we will translate. 346 * Whether vertex, instance, or constant attribs. */ 347 bit = 1 << vb_index; 348 for (type = 0; type < VB_NUM; type++) { 349 if (mask[type] & bit) { 350 break; 351 } 352 } 353 assert(type < VB_NUM); 354 assert(translate_is_output_format_supported(mgr->ve->native_format[i])); 355 /*printf("velem=%i type=%i\n", i, type);*/ 356 357 /* Add the vertex element. */ 358 k = &key[type]; 359 elem_index[type][i] = k->nr_elements; 360 361 te = &k->element[k->nr_elements]; 362 te->type = TRANSLATE_ELEMENT_NORMAL; 363 te->instance_divisor = 0; 364 te->input_buffer = vb_index; 365 te->input_format = mgr->ve->ve[i].src_format; 366 te->input_offset = mgr->ve->ve[i].src_offset; 367 te->output_format = mgr->ve->native_format[i]; 368 te->output_offset = k->output_stride; 369 370 k->output_stride += mgr->ve->native_format_size[i]; 371 k->nr_elements++; 372 } 373 374 /* Translate buffers. */ 375 for (type = 0; type < VB_NUM; type++) { 376 if (key[type].nr_elements) { 377 u_vbuf_translate_buffers(mgr, &key[type], mask[type], 378 mgr->fallback_vbs[type], 379 start[type], num[type]); 380 381 /* Fixup the stride for constant attribs. */ 382 if (type == VB_CONST) { 383 mgr->b.real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0; 384 } 385 } 386 } 387 388 /* Setup new vertex elements. */ 389 for (i = 0; i < mgr->ve->count; i++) { 390 for (type = 0; type < VB_NUM; type++) { 391 if (elem_index[type][i] < key[type].nr_elements) { 392 struct translate_element *te = &key[type].element[elem_index[type][i]]; 393 mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; 394 mgr->fallback_velems[i].src_format = te->output_format; 395 mgr->fallback_velems[i].src_offset = te->output_offset; 396 mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vbs[type]; 397 398 /* elem_index[type][i] can only be set for one type. */ 399 assert(type > VB_INSTANCE || elem_index[type+1][i] == ~0); 400 assert(type > VB_VERTEX || elem_index[type+2][i] == ~0); 401 break; 402 } 403 } 404 /* No translating, just copy the original vertex element over. */ 405 if (type == VB_NUM) { 406 memcpy(&mgr->fallback_velems[i], &mgr->ve->ve[i], 407 sizeof(struct pipe_vertex_element)); 408 } 409 } 410 411 mgr->fallback_ve = 412 mgr->pipe->create_vertex_elements_state(mgr->pipe, mgr->ve->count, 413 mgr->fallback_velems); 414 415 /* Preserve saved_ve. */ 416 mgr->ve_binding_lock = TRUE; 417 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->fallback_ve); 418 mgr->ve_binding_lock = FALSE; 419 return TRUE; 420} 421 422static void u_vbuf_translate_end(struct u_vbuf_priv *mgr) 423{ 424 unsigned i; 425 426 if (mgr->fallback_ve == NULL) { 427 return; 428 } 429 430 /* Restore vertex elements. */ 431 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */ 432 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve); 433 mgr->pipe->delete_vertex_elements_state(mgr->pipe, mgr->fallback_ve); 434 mgr->fallback_ve = NULL; 435 436 /* Unreference the now-unused VBOs. */ 437 for (i = 0; i < VB_NUM; i++) { 438 unsigned vb = mgr->fallback_vbs[i]; 439 if (vb != ~0) { 440 pipe_resource_reference(&mgr->b.real_vertex_buffer[vb].buffer, NULL); 441 mgr->fallback_vbs[i] = ~0; 442 } 443 } 444 mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; 445} 446 447#define FORMAT_REPLACE(what, withwhat) \ 448 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break 449 450struct u_vbuf_elements * 451u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, 452 unsigned count, 453 const struct pipe_vertex_element *attribs, 454 struct pipe_vertex_element *native_attribs) 455{ 456 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 457 unsigned i; 458 struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements); 459 460 ve->count = count; 461 462 if (!count) { 463 return ve; 464 } 465 466 memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count); 467 memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count); 468 469 /* Set the best native format in case the original format is not 470 * supported. */ 471 for (i = 0; i < count; i++) { 472 enum pipe_format format = ve->ve[i].src_format; 473 474 ve->src_format_size[i] = util_format_get_blocksize(format); 475 476 /* Choose a native format. 477 * For now we don't care about the alignment, that's going to 478 * be sorted out later. */ 479 if (!mgr->b.caps.format_fixed32) { 480 switch (format) { 481 FORMAT_REPLACE(R32_FIXED, R32_FLOAT); 482 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); 483 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT); 484 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT); 485 default:; 486 } 487 } 488 if (!mgr->b.caps.format_float16) { 489 switch (format) { 490 FORMAT_REPLACE(R16_FLOAT, R32_FLOAT); 491 FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT); 492 FORMAT_REPLACE(R16G16B16_FLOAT, R32G32B32_FLOAT); 493 FORMAT_REPLACE(R16G16B16A16_FLOAT, R32G32B32A32_FLOAT); 494 default:; 495 } 496 } 497 if (!mgr->b.caps.format_float64) { 498 switch (format) { 499 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); 500 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); 501 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT); 502 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT); 503 default:; 504 } 505 } 506 if (!mgr->b.caps.format_norm32) { 507 switch (format) { 508 FORMAT_REPLACE(R32_UNORM, R32_FLOAT); 509 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); 510 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT); 511 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT); 512 FORMAT_REPLACE(R32_SNORM, R32_FLOAT); 513 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT); 514 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT); 515 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT); 516 default:; 517 } 518 } 519 if (!mgr->b.caps.format_scaled32) { 520 switch (format) { 521 FORMAT_REPLACE(R32_USCALED, R32_FLOAT); 522 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); 523 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT); 524 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT); 525 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT); 526 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT); 527 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT); 528 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT); 529 default:; 530 } 531 } 532 533 native_attribs[i].src_format = format; 534 ve->native_format[i] = format; 535 ve->native_format_size[i] = 536 util_format_get_blocksize(ve->native_format[i]); 537 538 ve->incompatible_layout_elem[i] = 539 ve->ve[i].src_format != ve->native_format[i] || 540 (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0); 541 ve->incompatible_layout = 542 ve->incompatible_layout || 543 ve->incompatible_layout_elem[i]; 544 } 545 546 /* Align the formats to the size of DWORD if needed. */ 547 if (!mgr->b.caps.fetch_dword_unaligned) { 548 for (i = 0; i < count; i++) { 549 ve->native_format_size[i] = align(ve->native_format_size[i], 4); 550 } 551 } 552 553 return ve; 554} 555 556void u_vbuf_bind_vertex_elements(struct u_vbuf *mgrb, 557 void *cso, 558 struct u_vbuf_elements *ve) 559{ 560 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 561 562 if (!cso) { 563 return; 564 } 565 566 if (!mgr->ve_binding_lock) { 567 mgr->saved_ve = cso; 568 mgr->ve = ve; 569 } 570} 571 572void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr, 573 struct u_vbuf_elements *ve) 574{ 575 FREE(ve); 576} 577 578void u_vbuf_set_vertex_buffers(struct u_vbuf *mgrb, 579 unsigned count, 580 const struct pipe_vertex_buffer *bufs) 581{ 582 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 583 unsigned i; 584 585 mgr->any_user_vbs = FALSE; 586 mgr->incompatible_vb_layout = FALSE; 587 memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); 588 589 if (!mgr->b.caps.fetch_dword_unaligned) { 590 /* Check if the strides and offsets are aligned to the size of DWORD. */ 591 for (i = 0; i < count; i++) { 592 if (bufs[i].buffer) { 593 if (bufs[i].stride % 4 != 0 || 594 bufs[i].buffer_offset % 4 != 0) { 595 mgr->incompatible_vb_layout = TRUE; 596 mgr->incompatible_vb[i] = TRUE; 597 } 598 } 599 } 600 } 601 602 for (i = 0; i < count; i++) { 603 const struct pipe_vertex_buffer *vb = &bufs[i]; 604 605 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer); 606 607 mgr->b.real_vertex_buffer[i].buffer_offset = 608 mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset; 609 610 mgr->b.real_vertex_buffer[i].stride = 611 mgr->b.vertex_buffer[i].stride = vb->stride; 612 613 if (!vb->buffer || 614 mgr->incompatible_vb[i]) { 615 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 616 continue; 617 } 618 619 if (u_vbuf_resource(vb->buffer)->user_ptr) { 620 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 621 mgr->any_user_vbs = TRUE; 622 continue; 623 } 624 625 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer); 626 } 627 628 for (i = count; i < mgr->b.nr_vertex_buffers; i++) { 629 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 630 } 631 for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) { 632 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 633 } 634 635 mgr->b.nr_vertex_buffers = count; 636 mgr->b.nr_real_vertex_buffers = count; 637} 638 639void u_vbuf_set_index_buffer(struct u_vbuf *mgr, 640 const struct pipe_index_buffer *ib) 641{ 642 if (ib && ib->buffer) { 643 assert(ib->offset % ib->index_size == 0); 644 pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); 645 mgr->index_buffer.offset = ib->offset; 646 mgr->index_buffer.index_size = ib->index_size; 647 } else { 648 pipe_resource_reference(&mgr->index_buffer.buffer, NULL); 649 } 650} 651 652static void 653u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, 654 int start_vertex, unsigned num_vertices, 655 int start_instance, unsigned num_instances) 656{ 657 unsigned i; 658 unsigned nr_velems = mgr->ve->count; 659 unsigned nr_vbufs = mgr->b.nr_vertex_buffers; 660 struct pipe_vertex_element *velems = 661 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 662 unsigned start_offset[PIPE_MAX_ATTRIBS]; 663 unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; 664 665 /* Determine how much data needs to be uploaded. */ 666 for (i = 0; i < nr_velems; i++) { 667 struct pipe_vertex_element *velem = &velems[i]; 668 unsigned index = velem->vertex_buffer_index; 669 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index]; 670 unsigned instance_div, first, size; 671 672 /* Skip the buffers generated by translate. */ 673 if (index == mgr->fallback_vbs[VB_VERTEX] || 674 index == mgr->fallback_vbs[VB_INSTANCE] || 675 index == mgr->fallback_vbs[VB_CONST]) { 676 continue; 677 } 678 679 assert(vb->buffer); 680 681 if (!u_vbuf_resource(vb->buffer)->user_ptr) { 682 continue; 683 } 684 685 instance_div = velem->instance_divisor; 686 first = vb->buffer_offset + velem->src_offset; 687 688 if (!vb->stride) { 689 /* Constant attrib. */ 690 size = mgr->ve->src_format_size[i]; 691 } else if (instance_div) { 692 /* Per-instance attrib. */ 693 unsigned count = (num_instances + instance_div - 1) / instance_div; 694 first += vb->stride * start_instance; 695 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; 696 } else { 697 /* Per-vertex attrib. */ 698 first += vb->stride * start_vertex; 699 size = vb->stride * (num_vertices - 1) + mgr->ve->src_format_size[i]; 700 } 701 702 /* Update offsets. */ 703 if (!end_offset[index]) { 704 start_offset[index] = first; 705 end_offset[index] = first + size; 706 } else { 707 if (first < start_offset[index]) 708 start_offset[index] = first; 709 if (first + size > end_offset[index]) 710 end_offset[index] = first + size; 711 } 712 } 713 714 /* Upload buffers. */ 715 for (i = 0; i < nr_vbufs; i++) { 716 unsigned start, end = end_offset[i]; 717 struct pipe_vertex_buffer *real_vb; 718 uint8_t *ptr; 719 720 if (!end) { 721 continue; 722 } 723 724 start = start_offset[i]; 725 assert(start < end); 726 727 real_vb = &mgr->b.real_vertex_buffer[i]; 728 ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr; 729 730 u_upload_data(mgr->b.uploader, start, end - start, ptr + start, 731 &real_vb->buffer_offset, &real_vb->buffer); 732 733 real_vb->buffer_offset -= start; 734 } 735} 736 737unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgrb) 738{ 739 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 740 unsigned i, nr = mgr->ve->count; 741 struct pipe_vertex_element *velems = 742 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 743 unsigned result = ~0; 744 745 for (i = 0; i < nr; i++) { 746 struct pipe_vertex_buffer *vb = 747 &mgr->b.real_vertex_buffer[velems[i].vertex_buffer_index]; 748 unsigned size, max_count, value; 749 750 /* We're not interested in constant and per-instance attribs. */ 751 if (!vb->buffer || 752 !vb->stride || 753 velems[i].instance_divisor) { 754 continue; 755 } 756 757 size = vb->buffer->width0; 758 759 /* Subtract buffer_offset. */ 760 value = vb->buffer_offset; 761 if (value >= size) { 762 return 0; 763 } 764 size -= value; 765 766 /* Subtract src_offset. */ 767 value = velems[i].src_offset; 768 if (value >= size) { 769 return 0; 770 } 771 size -= value; 772 773 /* Subtract format_size. */ 774 value = mgr->ve->native_format_size[i]; 775 if (value >= size) { 776 return 0; 777 } 778 size -= value; 779 780 /* Compute the max count. */ 781 max_count = 1 + size / vb->stride; 782 result = MIN2(result, max_count); 783 } 784 return result; 785} 786 787static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr) 788{ 789 unsigned i, nr = mgr->ve->count; 790 791 for (i = 0; i < nr; i++) { 792 struct pipe_vertex_buffer *vb; 793 unsigned index; 794 795 /* Per-instance attribs don't need min/max_index. */ 796 if (mgr->ve->ve[i].instance_divisor) { 797 continue; 798 } 799 800 index = mgr->ve->ve[i].vertex_buffer_index; 801 vb = &mgr->b.vertex_buffer[index]; 802 803 /* Constant attribs don't need min/max_index. */ 804 if (!vb->stride) { 805 continue; 806 } 807 808 /* Per-vertex attribs need min/max_index. */ 809 if (u_vbuf_resource(vb->buffer)->user_ptr || 810 mgr->ve->incompatible_layout_elem[i] || 811 mgr->incompatible_vb[index]) { 812 return TRUE; 813 } 814 } 815 816 return FALSE; 817} 818 819static void u_vbuf_get_minmax_index(struct pipe_context *pipe, 820 struct pipe_index_buffer *ib, 821 const struct pipe_draw_info *info, 822 int *out_min_index, 823 int *out_max_index) 824{ 825 struct pipe_transfer *transfer = NULL; 826 const void *indices; 827 unsigned i; 828 unsigned restart_index = info->restart_index; 829 830 if (u_vbuf_resource(ib->buffer)->user_ptr) { 831 indices = u_vbuf_resource(ib->buffer)->user_ptr + 832 ib->offset + info->start * ib->index_size; 833 } else { 834 indices = pipe_buffer_map_range(pipe, ib->buffer, 835 ib->offset + info->start * ib->index_size, 836 info->count * ib->index_size, 837 PIPE_TRANSFER_READ, &transfer); 838 } 839 840 switch (ib->index_size) { 841 case 4: { 842 const unsigned *ui_indices = (const unsigned*)indices; 843 unsigned max_ui = 0; 844 unsigned min_ui = ~0U; 845 if (info->primitive_restart) { 846 for (i = 0; i < info->count; i++) { 847 if (ui_indices[i] != restart_index) { 848 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 849 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 850 } 851 } 852 } 853 else { 854 for (i = 0; i < info->count; i++) { 855 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 856 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 857 } 858 } 859 *out_min_index = min_ui; 860 *out_max_index = max_ui; 861 break; 862 } 863 case 2: { 864 const unsigned short *us_indices = (const unsigned short*)indices; 865 unsigned max_us = 0; 866 unsigned min_us = ~0U; 867 if (info->primitive_restart) { 868 for (i = 0; i < info->count; i++) { 869 if (us_indices[i] != restart_index) { 870 if (us_indices[i] > max_us) max_us = us_indices[i]; 871 if (us_indices[i] < min_us) min_us = us_indices[i]; 872 } 873 } 874 } 875 else { 876 for (i = 0; i < info->count; i++) { 877 if (us_indices[i] > max_us) max_us = us_indices[i]; 878 if (us_indices[i] < min_us) min_us = us_indices[i]; 879 } 880 } 881 *out_min_index = min_us; 882 *out_max_index = max_us; 883 break; 884 } 885 case 1: { 886 const unsigned char *ub_indices = (const unsigned char*)indices; 887 unsigned max_ub = 0; 888 unsigned min_ub = ~0U; 889 if (info->primitive_restart) { 890 for (i = 0; i < info->count; i++) { 891 if (ub_indices[i] != restart_index) { 892 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 893 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 894 } 895 } 896 } 897 else { 898 for (i = 0; i < info->count; i++) { 899 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 900 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 901 } 902 } 903 *out_min_index = min_ub; 904 *out_max_index = max_ub; 905 break; 906 } 907 default: 908 assert(0); 909 *out_min_index = 0; 910 *out_max_index = 0; 911 } 912 913 if (transfer) { 914 pipe_buffer_unmap(pipe, transfer); 915 } 916} 917 918enum u_vbuf_return_flags 919u_vbuf_draw_begin(struct u_vbuf *mgrb, 920 const struct pipe_draw_info *info) 921{ 922 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 923 int start_vertex; 924 unsigned num_vertices; 925 926 if (!mgr->incompatible_vb_layout && 927 !mgr->ve->incompatible_layout && 928 !mgr->any_user_vbs) { 929 return 0; 930 } 931 932 if (info->indexed) { 933 int min_index, max_index; 934 bool index_bounds_valid = false; 935 936 if (info->max_index != ~0) { 937 min_index = info->min_index; 938 max_index = info->max_index; 939 index_bounds_valid = true; 940 } else if (u_vbuf_need_minmax_index(mgr)) { 941 u_vbuf_get_minmax_index(mgr->pipe, &mgr->b.index_buffer, info, 942 &min_index, &max_index); 943 index_bounds_valid = true; 944 } 945 946 /* If the index bounds are valid, it means some upload or translation 947 * of per-vertex attribs will be performed. */ 948 if (index_bounds_valid) { 949 assert(min_index <= max_index); 950 951 start_vertex = min_index + info->index_bias; 952 num_vertices = max_index + 1 - min_index; 953 } else { 954 /* Nothing to do for per-vertex attribs. */ 955 start_vertex = 0; 956 num_vertices = 0; 957 min_index = 0; 958 } 959 } else { 960 start_vertex = info->start; 961 num_vertices = info->count; 962 } 963 964 /* Translate vertices with non-native layouts or formats. */ 965 if (mgr->incompatible_vb_layout || mgr->ve->incompatible_layout) { 966 /* XXX check the return value */ 967 u_vbuf_translate_begin(mgr, start_vertex, num_vertices, 968 info->start_instance, info->instance_count); 969 } 970 971 /* Upload user buffers. */ 972 if (mgr->any_user_vbs) { 973 u_vbuf_upload_buffers(mgr, start_vertex, num_vertices, 974 info->start_instance, info->instance_count); 975 } 976 977 /*unsigned i; 978 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 979 printf("input %i: ", i); 980 util_dump_vertex_buffer(stdout, mgr->b.vertex_buffer+i); 981 printf("\n"); 982 } 983 for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { 984 printf("real %i: ", i); 985 util_dump_vertex_buffer(stdout, mgr->b.real_vertex_buffer+i); 986 printf("\n"); 987 }*/ 988 989 return U_VBUF_BUFFERS_UPDATED; 990} 991 992void u_vbuf_draw_end(struct u_vbuf *mgrb) 993{ 994 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 995 996 if (mgr->fallback_ve) { 997 u_vbuf_translate_end(mgr); 998 } 999} 1000