u_vbuf.c revision 2d03d4f4a365d7af5f4dac20700009152eba1682
1/************************************************************************** 2 * 3 * Copyright 2011 Marek Olšák <maraeo@gmail.com> 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "util/u_vbuf.h" 29 30#include "util/u_dump.h" 31#include "util/u_format.h" 32#include "util/u_inlines.h" 33#include "util/u_memory.h" 34#include "util/u_upload_mgr.h" 35#include "translate/translate.h" 36#include "translate/translate_cache.h" 37#include "cso_cache/cso_cache.h" 38#include "cso_cache/cso_hash.h" 39 40struct u_vbuf_elements { 41 unsigned count; 42 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS]; 43 44 unsigned src_format_size[PIPE_MAX_ATTRIBS]; 45 46 /* If (velem[i].src_format != native_format[i]), the vertex buffer 47 * referenced by the vertex element cannot be used for rendering and 48 * its vertex data must be translated to native_format[i]. */ 49 enum pipe_format native_format[PIPE_MAX_ATTRIBS]; 50 unsigned native_format_size[PIPE_MAX_ATTRIBS]; 51 52 /* This might mean two things: 53 * - src_format != native_format, as discussed above. 54 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */ 55 boolean incompatible_layout; 56 /* Per-element flags. */ 57 boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS]; 58}; 59 60enum { 61 VB_VERTEX = 0, 62 VB_INSTANCE = 1, 63 VB_CONST = 2, 64 VB_NUM = 3 65}; 66 67struct u_vbuf_priv { 68 struct u_vbuf b; 69 struct pipe_context *pipe; 70 struct translate_cache *translate_cache; 71 struct cso_cache *cso_cache; 72 73 /* Vertex buffers for the driver. 74 * There are no user buffers. */ 75 struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS]; 76 int nr_real_vertex_buffers; 77 78 /* The index buffer. */ 79 struct pipe_index_buffer index_buffer; 80 81 /* Vertex element state bound by the state tracker. */ 82 void *saved_ve; 83 /* and its associated helper structure for this module. */ 84 struct u_vbuf_elements *ve; 85 86 /* Vertex elements used for the translate fallback. */ 87 struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS]; 88 /* If non-NULL, this is a vertex element state used for the translate 89 * fallback and therefore used for rendering too. */ 90 void *fallback_ve; 91 /* The vertex buffer slot index where translated vertices have been 92 * stored in. */ 93 unsigned fallback_vbs[VB_NUM]; 94 /* When binding the fallback vertex element state, we don't want to 95 * change saved_ve and ve. This is set to TRUE in such cases. */ 96 boolean ve_binding_lock; 97 98 /* Whether there is any user buffer. */ 99 boolean any_user_vbs; 100 /* Whether there is a buffer with a non-native layout. */ 101 boolean incompatible_vb_layout; 102 /* Per-buffer flags. */ 103 boolean incompatible_vb[PIPE_MAX_ATTRIBS]; 104 105 void (*driver_set_index_buffer)(struct pipe_context *pipe, 106 const struct pipe_index_buffer *); 107 void (*driver_set_vertex_buffers)(struct pipe_context *, 108 unsigned num_buffers, 109 const struct pipe_vertex_buffer *); 110}; 111 112static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr) 113{ 114 struct pipe_screen *screen = mgr->pipe->screen; 115 116 mgr->b.caps.format_fixed32 = 117 screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER, 118 0, PIPE_BIND_VERTEX_BUFFER); 119 120 mgr->b.caps.format_float16 = 121 screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER, 122 0, PIPE_BIND_VERTEX_BUFFER); 123 124 mgr->b.caps.format_float64 = 125 screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER, 126 0, PIPE_BIND_VERTEX_BUFFER); 127 128 mgr->b.caps.format_norm32 = 129 screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER, 130 0, PIPE_BIND_VERTEX_BUFFER) && 131 screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER, 132 0, PIPE_BIND_VERTEX_BUFFER); 133 134 mgr->b.caps.format_scaled32 = 135 screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER, 136 0, PIPE_BIND_VERTEX_BUFFER) && 137 screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER, 138 0, PIPE_BIND_VERTEX_BUFFER); 139} 140 141static void u_vbuf_install(struct u_vbuf_priv *mgr); 142 143struct u_vbuf * 144u_vbuf_create(struct pipe_context *pipe, 145 unsigned upload_buffer_size, 146 unsigned upload_buffer_alignment, 147 unsigned upload_buffer_bind, 148 enum u_fetch_alignment fetch_alignment) 149{ 150 struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv); 151 152 mgr->pipe = pipe; 153 mgr->cso_cache = cso_cache_create(); 154 mgr->translate_cache = translate_cache_create(); 155 memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs)); 156 157 mgr->b.uploader = u_upload_create(pipe, upload_buffer_size, 158 upload_buffer_alignment, 159 upload_buffer_bind); 160 161 mgr->b.caps.fetch_dword_unaligned = 162 fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED; 163 164 u_vbuf_init_format_caps(mgr); 165 u_vbuf_install(mgr); 166 return &mgr->b; 167} 168 169/* XXX I had to fork this off of cso_context. */ 170static void * 171u_vbuf_pipe_set_vertex_elements(struct u_vbuf_priv *mgr, 172 unsigned count, 173 const struct pipe_vertex_element *states) 174{ 175 unsigned key_size, hash_key; 176 struct cso_hash_iter iter; 177 void *handle; 178 struct cso_velems_state velems_state; 179 180 /* need to include the count into the stored state data too. */ 181 key_size = sizeof(struct pipe_vertex_element) * count + sizeof(unsigned); 182 velems_state.count = count; 183 memcpy(velems_state.velems, states, 184 sizeof(struct pipe_vertex_element) * count); 185 hash_key = cso_construct_key((void*)&velems_state, key_size); 186 iter = cso_find_state_template(mgr->cso_cache, hash_key, CSO_VELEMENTS, 187 (void*)&velems_state, key_size); 188 189 if (cso_hash_iter_is_null(iter)) { 190 struct cso_velements *cso = MALLOC_STRUCT(cso_velements); 191 memcpy(&cso->state, &velems_state, key_size); 192 cso->data = 193 mgr->pipe->create_vertex_elements_state(mgr->pipe, count, 194 &cso->state.velems[0]); 195 cso->delete_state = 196 (cso_state_callback)mgr->pipe->delete_vertex_elements_state; 197 cso->context = mgr->pipe; 198 199 iter = cso_insert_state(mgr->cso_cache, hash_key, CSO_VELEMENTS, cso); 200 handle = cso->data; 201 } else { 202 handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data; 203 } 204 205 mgr->pipe->bind_vertex_elements_state(mgr->pipe, handle); 206 return handle; 207} 208 209void u_vbuf_destroy(struct u_vbuf *mgrb) 210{ 211 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 212 unsigned i; 213 214 assert(mgr->pipe->draw); 215 mgr->pipe->draw = NULL; 216 217 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 218 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 219 } 220 for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { 221 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); 222 } 223 224 translate_cache_destroy(mgr->translate_cache); 225 u_upload_destroy(mgr->b.uploader); 226 cso_cache_delete(mgr->cso_cache); 227 FREE(mgr); 228} 229 230static void 231u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, 232 unsigned vb_mask, unsigned out_vb, 233 int start_vertex, unsigned num_vertices, 234 int start_index, unsigned num_indices, int min_index, 235 bool unroll_indices) 236{ 237 struct translate *tr; 238 struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; 239 struct pipe_resource *out_buffer = NULL; 240 uint8_t *out_map; 241 unsigned i, out_offset; 242 243 /* Get a translate object. */ 244 tr = translate_cache_find(mgr->translate_cache, key); 245 246 /* Map buffers we want to translate. */ 247 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 248 if (vb_mask & (1 << i)) { 249 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; 250 unsigned offset = vb->buffer_offset + vb->stride * start_vertex; 251 uint8_t *map; 252 253 if (u_vbuf_resource(vb->buffer)->user_ptr) { 254 map = u_vbuf_resource(vb->buffer)->user_ptr + offset; 255 } else { 256 unsigned size = vb->stride ? num_vertices * vb->stride 257 : sizeof(double)*4; 258 259 if (offset+size > vb->buffer->width0) { 260 size = vb->buffer->width0 - offset; 261 } 262 263 map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size, 264 PIPE_TRANSFER_READ, &vb_transfer[i]); 265 } 266 267 /* Subtract min_index so that indexing with the index buffer works. */ 268 if (unroll_indices) { 269 map -= vb->stride * min_index; 270 } 271 272 tr->set_buffer(tr, i, map, vb->stride, ~0); 273 } 274 } 275 276 /* Translate. */ 277 if (unroll_indices) { 278 struct pipe_index_buffer *ib = &mgr->index_buffer; 279 struct pipe_transfer *transfer = NULL; 280 unsigned offset = ib->offset + start_index * ib->index_size; 281 uint8_t *map; 282 283 assert(ib->buffer && ib->index_size); 284 285 if (u_vbuf_resource(ib->buffer)->user_ptr) { 286 map = u_vbuf_resource(ib->buffer)->user_ptr + offset; 287 } else { 288 map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset, 289 num_indices * ib->index_size, 290 PIPE_TRANSFER_READ, &transfer); 291 } 292 293 /* Create and map the output buffer. */ 294 u_upload_alloc(mgr->b.uploader, 0, 295 key->output_stride * num_indices, 296 &out_offset, &out_buffer, 297 (void**)&out_map); 298 299 switch (ib->index_size) { 300 case 4: 301 tr->run_elts(tr, (unsigned*)map, num_indices, 0, out_map); 302 break; 303 case 2: 304 tr->run_elts16(tr, (uint16_t*)map, num_indices, 0, out_map); 305 break; 306 case 1: 307 tr->run_elts8(tr, map, num_indices, 0, out_map); 308 break; 309 } 310 311 if (transfer) { 312 pipe_buffer_unmap(mgr->pipe, transfer); 313 } 314 } else { 315 /* Create and map the output buffer. */ 316 u_upload_alloc(mgr->b.uploader, 317 key->output_stride * start_vertex, 318 key->output_stride * num_vertices, 319 &out_offset, &out_buffer, 320 (void**)&out_map); 321 322 out_offset -= key->output_stride * start_vertex; 323 324 tr->run(tr, 0, num_vertices, 0, out_map); 325 } 326 327 /* Unmap all buffers. */ 328 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 329 if (vb_transfer[i]) { 330 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); 331 } 332 } 333 334 /* Setup the new vertex buffer. */ 335 mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset; 336 mgr->real_vertex_buffer[out_vb].stride = key->output_stride; 337 338 /* Move the buffer reference. */ 339 pipe_resource_reference( 340 &mgr->real_vertex_buffer[out_vb].buffer, NULL); 341 mgr->real_vertex_buffer[out_vb].buffer = out_buffer; 342} 343 344static boolean 345u_vbuf_translate_find_free_vb_slots(struct u_vbuf_priv *mgr, 346 unsigned mask[VB_NUM]) 347{ 348 unsigned i, type; 349 unsigned nr = mgr->ve->count; 350 boolean used_vb[PIPE_MAX_ATTRIBS] = {0}; 351 unsigned fallback_vbs[VB_NUM]; 352 353 memset(fallback_vbs, ~0, sizeof(fallback_vbs)); 354 355 /* Mark used vertex buffers as... used. */ 356 for (i = 0; i < nr; i++) { 357 if (!mgr->ve->incompatible_layout_elem[i]) { 358 unsigned index = mgr->ve->ve[i].vertex_buffer_index; 359 360 if (!mgr->incompatible_vb[index]) { 361 used_vb[index] = TRUE; 362 } 363 } 364 } 365 366 /* Find free slots for each type if needed. */ 367 i = 0; 368 for (type = 0; type < VB_NUM; type++) { 369 if (mask[type]) { 370 for (; i < PIPE_MAX_ATTRIBS; i++) { 371 if (!used_vb[i]) { 372 /*printf("found slot=%i for type=%i\n", i, type);*/ 373 fallback_vbs[type] = i; 374 i++; 375 if (i > mgr->nr_real_vertex_buffers) { 376 mgr->nr_real_vertex_buffers = i; 377 } 378 break; 379 } 380 } 381 if (i == PIPE_MAX_ATTRIBS) { 382 /* fail, reset the number to its original value */ 383 mgr->nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; 384 return FALSE; 385 } 386 } 387 } 388 389 memcpy(mgr->fallback_vbs, fallback_vbs, sizeof(fallback_vbs)); 390 return TRUE; 391} 392 393static boolean 394u_vbuf_translate_begin(struct u_vbuf_priv *mgr, 395 int start_vertex, unsigned num_vertices, 396 int start_instance, unsigned num_instances, 397 int start_index, unsigned num_indices, int min_index, 398 bool unroll_indices) 399{ 400 unsigned mask[VB_NUM] = {0}; 401 struct translate_key key[VB_NUM]; 402 unsigned elem_index[VB_NUM][PIPE_MAX_ATTRIBS]; /* ... into key.elements */ 403 unsigned i, type; 404 405 int start[VB_NUM] = { 406 start_vertex, /* VERTEX */ 407 start_instance, /* INSTANCE */ 408 0 /* CONST */ 409 }; 410 411 unsigned num[VB_NUM] = { 412 num_vertices, /* VERTEX */ 413 num_instances, /* INSTANCE */ 414 1 /* CONST */ 415 }; 416 417 memset(key, 0, sizeof(key)); 418 memset(elem_index, ~0, sizeof(elem_index)); 419 420 /* See if there are vertex attribs of each type to translate and 421 * which ones. */ 422 for (i = 0; i < mgr->ve->count; i++) { 423 unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index; 424 425 if (!mgr->b.vertex_buffer[vb_index].stride) { 426 if (!mgr->ve->incompatible_layout_elem[i] && 427 !mgr->incompatible_vb[vb_index]) { 428 continue; 429 } 430 mask[VB_CONST] |= 1 << vb_index; 431 } else if (mgr->ve->ve[i].instance_divisor) { 432 if (!mgr->ve->incompatible_layout_elem[i] && 433 !mgr->incompatible_vb[vb_index]) { 434 continue; 435 } 436 mask[VB_INSTANCE] |= 1 << vb_index; 437 } else { 438 if (!unroll_indices && 439 !mgr->ve->incompatible_layout_elem[i] && 440 !mgr->incompatible_vb[vb_index]) { 441 continue; 442 } 443 mask[VB_VERTEX] |= 1 << vb_index; 444 } 445 } 446 447 assert(mask[VB_VERTEX] || mask[VB_INSTANCE] || mask[VB_CONST]); 448 449 /* Find free vertex buffer slots. */ 450 if (!u_vbuf_translate_find_free_vb_slots(mgr, mask)) { 451 return FALSE; 452 } 453 454 /* Initialize the translate keys. */ 455 for (i = 0; i < mgr->ve->count; i++) { 456 struct translate_key *k; 457 struct translate_element *te; 458 unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index; 459 bit = 1 << vb_index; 460 461 if (!mgr->ve->incompatible_layout_elem[i] && 462 !mgr->incompatible_vb[vb_index] && 463 (!unroll_indices || !(mask[VB_VERTEX] & bit))) { 464 continue; 465 } 466 467 /* Set type to what we will translate. 468 * Whether vertex, instance, or constant attribs. */ 469 for (type = 0; type < VB_NUM; type++) { 470 if (mask[type] & bit) { 471 break; 472 } 473 } 474 assert(type < VB_NUM); 475 assert(translate_is_output_format_supported(mgr->ve->native_format[i])); 476 /*printf("velem=%i type=%i\n", i, type);*/ 477 478 /* Add the vertex element. */ 479 k = &key[type]; 480 elem_index[type][i] = k->nr_elements; 481 482 te = &k->element[k->nr_elements]; 483 te->type = TRANSLATE_ELEMENT_NORMAL; 484 te->instance_divisor = 0; 485 te->input_buffer = vb_index; 486 te->input_format = mgr->ve->ve[i].src_format; 487 te->input_offset = mgr->ve->ve[i].src_offset; 488 te->output_format = mgr->ve->native_format[i]; 489 te->output_offset = k->output_stride; 490 491 k->output_stride += mgr->ve->native_format_size[i]; 492 k->nr_elements++; 493 } 494 495 /* Translate buffers. */ 496 for (type = 0; type < VB_NUM; type++) { 497 if (key[type].nr_elements) { 498 u_vbuf_translate_buffers(mgr, &key[type], mask[type], 499 mgr->fallback_vbs[type], 500 start[type], num[type], 501 start_index, num_indices, min_index, 502 unroll_indices && type == VB_VERTEX); 503 504 /* Fixup the stride for constant attribs. */ 505 if (type == VB_CONST) { 506 mgr->real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0; 507 } 508 } 509 } 510 511 /* Setup new vertex elements. */ 512 for (i = 0; i < mgr->ve->count; i++) { 513 for (type = 0; type < VB_NUM; type++) { 514 if (elem_index[type][i] < key[type].nr_elements) { 515 struct translate_element *te = &key[type].element[elem_index[type][i]]; 516 mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; 517 mgr->fallback_velems[i].src_format = te->output_format; 518 mgr->fallback_velems[i].src_offset = te->output_offset; 519 mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vbs[type]; 520 521 /* elem_index[type][i] can only be set for one type. */ 522 assert(type > VB_INSTANCE || elem_index[type+1][i] == ~0); 523 assert(type > VB_VERTEX || elem_index[type+2][i] == ~0); 524 break; 525 } 526 } 527 /* No translating, just copy the original vertex element over. */ 528 if (type == VB_NUM) { 529 memcpy(&mgr->fallback_velems[i], &mgr->ve->ve[i], 530 sizeof(struct pipe_vertex_element)); 531 } 532 } 533 534 /* Preserve saved_ve. */ 535 mgr->ve_binding_lock = TRUE; 536 mgr->fallback_ve = u_vbuf_pipe_set_vertex_elements(mgr, mgr->ve->count, 537 mgr->fallback_velems); 538 mgr->ve_binding_lock = FALSE; 539 return TRUE; 540} 541 542static void u_vbuf_translate_end(struct u_vbuf_priv *mgr) 543{ 544 unsigned i; 545 546 /* Restore vertex elements. */ 547 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */ 548 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve); 549 mgr->fallback_ve = NULL; 550 551 /* Unreference the now-unused VBOs. */ 552 for (i = 0; i < VB_NUM; i++) { 553 unsigned vb = mgr->fallback_vbs[i]; 554 if (vb != ~0) { 555 pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer, NULL); 556 mgr->fallback_vbs[i] = ~0; 557 } 558 } 559 mgr->nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; 560} 561 562#define FORMAT_REPLACE(what, withwhat) \ 563 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break 564 565struct u_vbuf_elements * 566u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, 567 unsigned count, 568 const struct pipe_vertex_element *attribs, 569 struct pipe_vertex_element *native_attribs) 570{ 571 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 572 unsigned i; 573 struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements); 574 575 ve->count = count; 576 577 if (!count) { 578 return ve; 579 } 580 581 memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count); 582 memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count); 583 584 /* Set the best native format in case the original format is not 585 * supported. */ 586 for (i = 0; i < count; i++) { 587 enum pipe_format format = ve->ve[i].src_format; 588 589 ve->src_format_size[i] = util_format_get_blocksize(format); 590 591 /* Choose a native format. 592 * For now we don't care about the alignment, that's going to 593 * be sorted out later. */ 594 if (!mgr->b.caps.format_fixed32) { 595 switch (format) { 596 FORMAT_REPLACE(R32_FIXED, R32_FLOAT); 597 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); 598 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT); 599 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT); 600 default:; 601 } 602 } 603 if (!mgr->b.caps.format_float16) { 604 switch (format) { 605 FORMAT_REPLACE(R16_FLOAT, R32_FLOAT); 606 FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT); 607 FORMAT_REPLACE(R16G16B16_FLOAT, R32G32B32_FLOAT); 608 FORMAT_REPLACE(R16G16B16A16_FLOAT, R32G32B32A32_FLOAT); 609 default:; 610 } 611 } 612 if (!mgr->b.caps.format_float64) { 613 switch (format) { 614 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); 615 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); 616 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT); 617 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT); 618 default:; 619 } 620 } 621 if (!mgr->b.caps.format_norm32) { 622 switch (format) { 623 FORMAT_REPLACE(R32_UNORM, R32_FLOAT); 624 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); 625 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT); 626 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT); 627 FORMAT_REPLACE(R32_SNORM, R32_FLOAT); 628 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT); 629 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT); 630 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT); 631 default:; 632 } 633 } 634 if (!mgr->b.caps.format_scaled32) { 635 switch (format) { 636 FORMAT_REPLACE(R32_USCALED, R32_FLOAT); 637 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); 638 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT); 639 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT); 640 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT); 641 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT); 642 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT); 643 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT); 644 default:; 645 } 646 } 647 648 native_attribs[i].src_format = format; 649 ve->native_format[i] = format; 650 ve->native_format_size[i] = 651 util_format_get_blocksize(ve->native_format[i]); 652 653 ve->incompatible_layout_elem[i] = 654 ve->ve[i].src_format != ve->native_format[i] || 655 (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0); 656 ve->incompatible_layout = 657 ve->incompatible_layout || 658 ve->incompatible_layout_elem[i]; 659 } 660 661 /* Align the formats to the size of DWORD if needed. */ 662 if (!mgr->b.caps.fetch_dword_unaligned) { 663 for (i = 0; i < count; i++) { 664 ve->native_format_size[i] = align(ve->native_format_size[i], 4); 665 } 666 } 667 668 return ve; 669} 670 671void u_vbuf_bind_vertex_elements(struct u_vbuf *mgrb, 672 void *cso, 673 struct u_vbuf_elements *ve) 674{ 675 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 676 677 if (!cso) { 678 return; 679 } 680 681 if (!mgr->ve_binding_lock) { 682 mgr->saved_ve = cso; 683 mgr->ve = ve; 684 } 685} 686 687void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr, 688 struct u_vbuf_elements *ve) 689{ 690 FREE(ve); 691} 692 693static void u_vbuf_set_vertex_buffers(struct pipe_context *pipe, 694 unsigned count, 695 const struct pipe_vertex_buffer *bufs) 696{ 697 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)pipe->draw; 698 unsigned i; 699 700 mgr->any_user_vbs = FALSE; 701 mgr->incompatible_vb_layout = FALSE; 702 memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); 703 704 if (!mgr->b.caps.fetch_dword_unaligned) { 705 /* Check if the strides and offsets are aligned to the size of DWORD. */ 706 for (i = 0; i < count; i++) { 707 if (bufs[i].buffer) { 708 if (bufs[i].stride % 4 != 0 || 709 bufs[i].buffer_offset % 4 != 0) { 710 mgr->incompatible_vb_layout = TRUE; 711 mgr->incompatible_vb[i] = TRUE; 712 } 713 } 714 } 715 } 716 717 for (i = 0; i < count; i++) { 718 const struct pipe_vertex_buffer *vb = &bufs[i]; 719 720 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer); 721 722 mgr->real_vertex_buffer[i].buffer_offset = 723 mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset; 724 725 mgr->real_vertex_buffer[i].stride = 726 mgr->b.vertex_buffer[i].stride = vb->stride; 727 728 if (!vb->buffer || 729 mgr->incompatible_vb[i]) { 730 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); 731 continue; 732 } 733 734 if (u_vbuf_resource(vb->buffer)->user_ptr) { 735 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); 736 mgr->any_user_vbs = TRUE; 737 continue; 738 } 739 740 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, vb->buffer); 741 } 742 743 for (i = count; i < mgr->b.nr_vertex_buffers; i++) { 744 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 745 } 746 for (i = count; i < mgr->nr_real_vertex_buffers; i++) { 747 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); 748 } 749 750 mgr->b.nr_vertex_buffers = count; 751 mgr->nr_real_vertex_buffers = count; 752 753 if (!mgr->any_user_vbs && !mgr->incompatible_vb_layout) { 754 mgr->driver_set_vertex_buffers(pipe, mgr->nr_real_vertex_buffers, 755 mgr->real_vertex_buffer); 756 } 757} 758 759static void u_vbuf_set_index_buffer(struct pipe_context *pipe, 760 const struct pipe_index_buffer *ib) 761{ 762 struct u_vbuf_priv *mgr = pipe->draw; 763 764 if (ib && ib->buffer) { 765 assert(ib->offset % ib->index_size == 0); 766 pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); 767 mgr->index_buffer.offset = ib->offset; 768 mgr->index_buffer.index_size = ib->index_size; 769 } else { 770 pipe_resource_reference(&mgr->index_buffer.buffer, NULL); 771 } 772 773 mgr->driver_set_index_buffer(pipe, ib); 774} 775 776static void 777u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, 778 int start_vertex, unsigned num_vertices, 779 int start_instance, unsigned num_instances) 780{ 781 unsigned i; 782 unsigned nr_velems = mgr->ve->count; 783 unsigned nr_vbufs = mgr->b.nr_vertex_buffers; 784 struct pipe_vertex_element *velems = 785 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 786 unsigned start_offset[PIPE_MAX_ATTRIBS]; 787 unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; 788 789 /* Determine how much data needs to be uploaded. */ 790 for (i = 0; i < nr_velems; i++) { 791 struct pipe_vertex_element *velem = &velems[i]; 792 unsigned index = velem->vertex_buffer_index; 793 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index]; 794 unsigned instance_div, first, size; 795 796 /* Skip the buffers generated by translate. */ 797 if (index == mgr->fallback_vbs[VB_VERTEX] || 798 index == mgr->fallback_vbs[VB_INSTANCE] || 799 index == mgr->fallback_vbs[VB_CONST]) { 800 continue; 801 } 802 803 assert(vb->buffer); 804 805 if (!u_vbuf_resource(vb->buffer)->user_ptr) { 806 continue; 807 } 808 809 instance_div = velem->instance_divisor; 810 first = vb->buffer_offset + velem->src_offset; 811 812 if (!vb->stride) { 813 /* Constant attrib. */ 814 size = mgr->ve->src_format_size[i]; 815 } else if (instance_div) { 816 /* Per-instance attrib. */ 817 unsigned count = (num_instances + instance_div - 1) / instance_div; 818 first += vb->stride * start_instance; 819 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; 820 } else { 821 /* Per-vertex attrib. */ 822 first += vb->stride * start_vertex; 823 size = vb->stride * (num_vertices - 1) + mgr->ve->src_format_size[i]; 824 } 825 826 /* Update offsets. */ 827 if (!end_offset[index]) { 828 start_offset[index] = first; 829 end_offset[index] = first + size; 830 } else { 831 if (first < start_offset[index]) 832 start_offset[index] = first; 833 if (first + size > end_offset[index]) 834 end_offset[index] = first + size; 835 } 836 } 837 838 /* Upload buffers. */ 839 for (i = 0; i < nr_vbufs; i++) { 840 unsigned start, end = end_offset[i]; 841 struct pipe_vertex_buffer *real_vb; 842 uint8_t *ptr; 843 844 if (!end) { 845 continue; 846 } 847 848 start = start_offset[i]; 849 assert(start < end); 850 851 real_vb = &mgr->real_vertex_buffer[i]; 852 ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr; 853 854 u_upload_data(mgr->b.uploader, start, end - start, ptr + start, 855 &real_vb->buffer_offset, &real_vb->buffer); 856 857 real_vb->buffer_offset -= start; 858 } 859} 860 861unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgrb) 862{ 863 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 864 unsigned i, nr = mgr->ve->count; 865 struct pipe_vertex_element *velems = 866 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 867 unsigned result = ~0; 868 869 for (i = 0; i < nr; i++) { 870 struct pipe_vertex_buffer *vb = 871 &mgr->real_vertex_buffer[velems[i].vertex_buffer_index]; 872 unsigned size, max_count, value; 873 874 /* We're not interested in constant and per-instance attribs. */ 875 if (!vb->buffer || 876 !vb->stride || 877 velems[i].instance_divisor) { 878 continue; 879 } 880 881 size = vb->buffer->width0; 882 883 /* Subtract buffer_offset. */ 884 value = vb->buffer_offset; 885 if (value >= size) { 886 return 0; 887 } 888 size -= value; 889 890 /* Subtract src_offset. */ 891 value = velems[i].src_offset; 892 if (value >= size) { 893 return 0; 894 } 895 size -= value; 896 897 /* Subtract format_size. */ 898 value = mgr->ve->native_format_size[i]; 899 if (value >= size) { 900 return 0; 901 } 902 size -= value; 903 904 /* Compute the max count. */ 905 max_count = 1 + size / vb->stride; 906 result = MIN2(result, max_count); 907 } 908 return result; 909} 910 911static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr) 912{ 913 unsigned i, nr = mgr->ve->count; 914 915 for (i = 0; i < nr; i++) { 916 struct pipe_vertex_buffer *vb; 917 unsigned index; 918 919 /* Per-instance attribs don't need min/max_index. */ 920 if (mgr->ve->ve[i].instance_divisor) { 921 continue; 922 } 923 924 index = mgr->ve->ve[i].vertex_buffer_index; 925 vb = &mgr->b.vertex_buffer[index]; 926 927 /* Constant attribs don't need min/max_index. */ 928 if (!vb->stride) { 929 continue; 930 } 931 932 /* Per-vertex attribs need min/max_index. */ 933 if (u_vbuf_resource(vb->buffer)->user_ptr || 934 mgr->ve->incompatible_layout_elem[i] || 935 mgr->incompatible_vb[index]) { 936 return TRUE; 937 } 938 } 939 940 return FALSE; 941} 942 943static boolean u_vbuf_mapping_vertex_buffer_blocks(struct u_vbuf_priv *mgr) 944{ 945 unsigned i, nr = mgr->ve->count; 946 947 for (i = 0; i < nr; i++) { 948 struct pipe_vertex_buffer *vb; 949 unsigned index; 950 951 /* Per-instance attribs are not per-vertex data. */ 952 if (mgr->ve->ve[i].instance_divisor) { 953 continue; 954 } 955 956 index = mgr->ve->ve[i].vertex_buffer_index; 957 vb = &mgr->b.vertex_buffer[index]; 958 959 /* Constant attribs are not per-vertex data. */ 960 if (!vb->stride) { 961 continue; 962 } 963 964 /* Return true for the hw buffers which don't need to be translated. */ 965 /* XXX we could use some kind of a is-busy query. */ 966 if (!u_vbuf_resource(vb->buffer)->user_ptr && 967 !mgr->ve->incompatible_layout_elem[i] && 968 !mgr->incompatible_vb[index]) { 969 return TRUE; 970 } 971 } 972 973 return FALSE; 974} 975 976static void u_vbuf_get_minmax_index(struct pipe_context *pipe, 977 struct pipe_index_buffer *ib, 978 const struct pipe_draw_info *info, 979 int *out_min_index, 980 int *out_max_index) 981{ 982 struct pipe_transfer *transfer = NULL; 983 const void *indices; 984 unsigned i; 985 unsigned restart_index = info->restart_index; 986 987 if (u_vbuf_resource(ib->buffer)->user_ptr) { 988 indices = u_vbuf_resource(ib->buffer)->user_ptr + 989 ib->offset + info->start * ib->index_size; 990 } else { 991 indices = pipe_buffer_map_range(pipe, ib->buffer, 992 ib->offset + info->start * ib->index_size, 993 info->count * ib->index_size, 994 PIPE_TRANSFER_READ, &transfer); 995 } 996 997 switch (ib->index_size) { 998 case 4: { 999 const unsigned *ui_indices = (const unsigned*)indices; 1000 unsigned max_ui = 0; 1001 unsigned min_ui = ~0U; 1002 if (info->primitive_restart) { 1003 for (i = 0; i < info->count; i++) { 1004 if (ui_indices[i] != restart_index) { 1005 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 1006 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 1007 } 1008 } 1009 } 1010 else { 1011 for (i = 0; i < info->count; i++) { 1012 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 1013 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 1014 } 1015 } 1016 *out_min_index = min_ui; 1017 *out_max_index = max_ui; 1018 break; 1019 } 1020 case 2: { 1021 const unsigned short *us_indices = (const unsigned short*)indices; 1022 unsigned max_us = 0; 1023 unsigned min_us = ~0U; 1024 if (info->primitive_restart) { 1025 for (i = 0; i < info->count; i++) { 1026 if (us_indices[i] != restart_index) { 1027 if (us_indices[i] > max_us) max_us = us_indices[i]; 1028 if (us_indices[i] < min_us) min_us = us_indices[i]; 1029 } 1030 } 1031 } 1032 else { 1033 for (i = 0; i < info->count; i++) { 1034 if (us_indices[i] > max_us) max_us = us_indices[i]; 1035 if (us_indices[i] < min_us) min_us = us_indices[i]; 1036 } 1037 } 1038 *out_min_index = min_us; 1039 *out_max_index = max_us; 1040 break; 1041 } 1042 case 1: { 1043 const unsigned char *ub_indices = (const unsigned char*)indices; 1044 unsigned max_ub = 0; 1045 unsigned min_ub = ~0U; 1046 if (info->primitive_restart) { 1047 for (i = 0; i < info->count; i++) { 1048 if (ub_indices[i] != restart_index) { 1049 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 1050 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 1051 } 1052 } 1053 } 1054 else { 1055 for (i = 0; i < info->count; i++) { 1056 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 1057 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 1058 } 1059 } 1060 *out_min_index = min_ub; 1061 *out_max_index = max_ub; 1062 break; 1063 } 1064 default: 1065 assert(0); 1066 *out_min_index = 0; 1067 *out_max_index = 0; 1068 } 1069 1070 if (transfer) { 1071 pipe_buffer_unmap(pipe, transfer); 1072 } 1073} 1074 1075void u_vbuf_draw_begin(struct u_vbuf *mgrb, 1076 struct pipe_draw_info *info) 1077{ 1078 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 1079 int start_vertex, min_index; 1080 unsigned num_vertices; 1081 bool unroll_indices = false; 1082 1083 if (!mgr->incompatible_vb_layout && 1084 !mgr->ve->incompatible_layout && 1085 !mgr->any_user_vbs) { 1086 return; 1087 } 1088 1089 if (info->indexed) { 1090 int max_index; 1091 bool index_bounds_valid = false; 1092 1093 if (info->max_index != ~0) { 1094 min_index = info->min_index; 1095 max_index = info->max_index; 1096 index_bounds_valid = true; 1097 } else if (u_vbuf_need_minmax_index(mgr)) { 1098 u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer, info, 1099 &min_index, &max_index); 1100 index_bounds_valid = true; 1101 } 1102 1103 /* If the index bounds are valid, it means some upload or translation 1104 * of per-vertex attribs will be performed. */ 1105 if (index_bounds_valid) { 1106 assert(min_index <= max_index); 1107 1108 start_vertex = min_index + info->index_bias; 1109 num_vertices = max_index + 1 - min_index; 1110 1111 /* Primitive restart doesn't work when unrolling indices. 1112 * We would have to break this drawing operation into several ones. */ 1113 /* Use some heuristic to see if unrolling indices improves 1114 * performance. */ 1115 if (!info->primitive_restart && 1116 num_vertices > info->count*2 && 1117 num_vertices-info->count > 32 && 1118 !u_vbuf_mapping_vertex_buffer_blocks(mgr)) { 1119 /*printf("num_vertices=%i count=%i\n", num_vertices, info->count);*/ 1120 unroll_indices = true; 1121 } 1122 } else { 1123 /* Nothing to do for per-vertex attribs. */ 1124 start_vertex = 0; 1125 num_vertices = 0; 1126 min_index = 0; 1127 } 1128 } else { 1129 start_vertex = info->start; 1130 num_vertices = info->count; 1131 min_index = 0; 1132 } 1133 1134 /* Translate vertices with non-native layouts or formats. */ 1135 if (unroll_indices || 1136 mgr->incompatible_vb_layout || 1137 mgr->ve->incompatible_layout) { 1138 /* XXX check the return value */ 1139 u_vbuf_translate_begin(mgr, start_vertex, num_vertices, 1140 info->start_instance, info->instance_count, 1141 info->start, info->count, min_index, 1142 unroll_indices); 1143 } 1144 1145 /* Upload user buffers. */ 1146 if (mgr->any_user_vbs) { 1147 u_vbuf_upload_buffers(mgr, start_vertex, num_vertices, 1148 info->start_instance, info->instance_count); 1149 } 1150 1151 /* 1152 if (unroll_indices) { 1153 printf("unrolling indices: start_vertex = %i, num_vertices = %i\n", 1154 start_vertex, num_vertices); 1155 util_dump_draw_info(stdout, info); 1156 printf("\n"); 1157 } 1158 1159 unsigned i; 1160 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 1161 printf("input %i: ", i); 1162 util_dump_vertex_buffer(stdout, mgr->b.vertex_buffer+i); 1163 printf("\n"); 1164 } 1165 for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { 1166 printf("real %i: ", i); 1167 util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i); 1168 printf("\n"); 1169 } 1170 */ 1171 1172 if (unroll_indices) { 1173 info->indexed = FALSE; 1174 info->index_bias = 0; 1175 info->min_index = 0; 1176 info->max_index = info->count - 1; 1177 info->start = 0; 1178 } 1179 1180 mgr->driver_set_vertex_buffers(mgr->pipe, mgr->nr_real_vertex_buffers, 1181 mgr->real_vertex_buffer); 1182} 1183 1184void u_vbuf_draw_end(struct u_vbuf *mgrb) 1185{ 1186 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 1187 1188 if (mgr->fallback_ve) { 1189 u_vbuf_translate_end(mgr); 1190 } 1191} 1192 1193static void u_vbuf_install(struct u_vbuf_priv *mgr) 1194{ 1195 struct pipe_context *pipe = mgr->pipe; 1196 assert(!pipe->draw); 1197 1198 pipe->draw = mgr; 1199 mgr->driver_set_index_buffer = pipe->set_index_buffer; 1200 mgr->driver_set_vertex_buffers = pipe->set_vertex_buffers; 1201 pipe->set_index_buffer = u_vbuf_set_index_buffer; 1202 pipe->set_vertex_buffers = u_vbuf_set_vertex_buffers; 1203} 1204