u_vbuf.c revision e0773da1e897164ed7597437070e32b867734ee5
1/************************************************************************** 2 * 3 * Copyright 2011 Marek Olšák <maraeo@gmail.com> 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "util/u_vbuf.h" 29 30#include "util/u_dump.h" 31#include "util/u_format.h" 32#include "util/u_inlines.h" 33#include "util/u_memory.h" 34#include "util/u_upload_mgr.h" 35#include "translate/translate.h" 36#include "translate/translate_cache.h" 37#include "cso_cache/cso_cache.h" 38#include "cso_cache/cso_hash.h" 39 40struct u_vbuf_elements { 41 unsigned count; 42 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS]; 43 44 unsigned src_format_size[PIPE_MAX_ATTRIBS]; 45 46 /* If (velem[i].src_format != native_format[i]), the vertex buffer 47 * referenced by the vertex element cannot be used for rendering and 48 * its vertex data must be translated to native_format[i]. */ 49 enum pipe_format native_format[PIPE_MAX_ATTRIBS]; 50 unsigned native_format_size[PIPE_MAX_ATTRIBS]; 51 52 /* This might mean two things: 53 * - src_format != native_format, as discussed above. 54 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */ 55 boolean incompatible_layout; 56 /* Per-element flags. */ 57 boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS]; 58 59 void *driver_cso; 60}; 61 62enum { 63 VB_VERTEX = 0, 64 VB_INSTANCE = 1, 65 VB_CONST = 2, 66 VB_NUM = 3 67}; 68 69struct u_vbuf { 70 struct u_vbuf_caps caps; 71 72 struct pipe_context *pipe; 73 struct translate_cache *translate_cache; 74 struct cso_cache *cso_cache; 75 struct u_upload_mgr *uploader; 76 77 /* This is what was set in set_vertex_buffers. 78 * May contain user buffers. */ 79 struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; 80 unsigned nr_vertex_buffers; 81 82 /* Saved vertex buffers. */ 83 struct pipe_vertex_buffer vertex_buffer_saved[PIPE_MAX_ATTRIBS]; 84 unsigned nr_vertex_buffers_saved; 85 86 /* Vertex buffers for the driver. 87 * There are no user buffers. */ 88 struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS]; 89 int nr_real_vertex_buffers; 90 boolean vertex_buffers_dirty; 91 92 /* The index buffer. */ 93 struct pipe_index_buffer index_buffer; 94 95 /* Vertex elements. */ 96 struct u_vbuf_elements *ve, *ve_saved; 97 98 /* Vertex elements used for the translate fallback. */ 99 struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS]; 100 /* If non-NULL, this is a vertex element state used for the translate 101 * fallback and therefore used for rendering too. */ 102 boolean using_translate; 103 /* The vertex buffer slot index where translated vertices have been 104 * stored in. */ 105 unsigned fallback_vbs[VB_NUM]; 106 107 /* Whether there is any user buffer. */ 108 boolean any_user_vbs; 109 /* Whether there is a buffer with a non-native layout. */ 110 boolean incompatible_vb_layout; 111 /* Per-buffer flags. */ 112 boolean incompatible_vb[PIPE_MAX_ATTRIBS]; 113}; 114 115static void * 116u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count, 117 const struct pipe_vertex_element *attribs); 118static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso); 119 120 121void u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps) 122{ 123 caps->format_fixed32 = 124 screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER, 125 0, PIPE_BIND_VERTEX_BUFFER); 126 127 caps->format_float16 = 128 screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER, 129 0, PIPE_BIND_VERTEX_BUFFER); 130 131 caps->format_float64 = 132 screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER, 133 0, PIPE_BIND_VERTEX_BUFFER); 134 135 caps->format_norm32 = 136 screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER, 137 0, PIPE_BIND_VERTEX_BUFFER) && 138 screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER, 139 0, PIPE_BIND_VERTEX_BUFFER); 140 141 caps->format_scaled32 = 142 screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER, 143 0, PIPE_BIND_VERTEX_BUFFER) && 144 screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER, 145 0, PIPE_BIND_VERTEX_BUFFER); 146 147 caps->fetch_dword_unaligned = 148 !screen->get_param(screen, 149 PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY) && 150 !screen->get_param(screen, 151 PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY) && 152 !screen->get_param(screen, 153 PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY); 154 155 caps->user_vertex_buffers = 156 screen->get_param(screen, PIPE_CAP_USER_VERTEX_BUFFERS); 157} 158 159struct u_vbuf * 160u_vbuf_create(struct pipe_context *pipe, 161 struct u_vbuf_caps *caps) 162{ 163 struct u_vbuf *mgr = CALLOC_STRUCT(u_vbuf); 164 165 mgr->caps = *caps; 166 mgr->pipe = pipe; 167 mgr->cso_cache = cso_cache_create(); 168 mgr->translate_cache = translate_cache_create(); 169 memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs)); 170 171 mgr->uploader = u_upload_create(pipe, 1024 * 1024, 4, 172 PIPE_BIND_VERTEX_BUFFER); 173 174 return mgr; 175} 176 177/* u_vbuf uses its own caching for vertex elements, because it needs to keep 178 * its own preprocessed state per vertex element CSO. */ 179static struct u_vbuf_elements * 180u_vbuf_set_vertex_elements_internal(struct u_vbuf *mgr, unsigned count, 181 const struct pipe_vertex_element *states) 182{ 183 struct pipe_context *pipe = mgr->pipe; 184 unsigned key_size, hash_key; 185 struct cso_hash_iter iter; 186 struct u_vbuf_elements *ve; 187 struct cso_velems_state velems_state; 188 189 /* need to include the count into the stored state data too. */ 190 key_size = sizeof(struct pipe_vertex_element) * count + sizeof(unsigned); 191 velems_state.count = count; 192 memcpy(velems_state.velems, states, 193 sizeof(struct pipe_vertex_element) * count); 194 hash_key = cso_construct_key((void*)&velems_state, key_size); 195 iter = cso_find_state_template(mgr->cso_cache, hash_key, CSO_VELEMENTS, 196 (void*)&velems_state, key_size); 197 198 if (cso_hash_iter_is_null(iter)) { 199 struct cso_velements *cso = MALLOC_STRUCT(cso_velements); 200 memcpy(&cso->state, &velems_state, key_size); 201 cso->data = u_vbuf_create_vertex_elements(mgr, count, states); 202 cso->delete_state = (cso_state_callback)u_vbuf_delete_vertex_elements; 203 cso->context = (void*)mgr; 204 205 iter = cso_insert_state(mgr->cso_cache, hash_key, CSO_VELEMENTS, cso); 206 ve = cso->data; 207 } else { 208 ve = ((struct cso_velements *)cso_hash_iter_data(iter))->data; 209 } 210 211 assert(ve); 212 pipe->bind_vertex_elements_state(pipe, ve->driver_cso); 213 return ve; 214} 215 216void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count, 217 const struct pipe_vertex_element *states) 218{ 219 mgr->ve = u_vbuf_set_vertex_elements_internal(mgr, count, states); 220} 221 222void u_vbuf_destroy(struct u_vbuf *mgr) 223{ 224 unsigned i; 225 226 for (i = 0; i < mgr->nr_vertex_buffers; i++) { 227 pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); 228 } 229 for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { 230 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); 231 } 232 233 translate_cache_destroy(mgr->translate_cache); 234 u_upload_destroy(mgr->uploader); 235 cso_cache_delete(mgr->cso_cache); 236 FREE(mgr); 237} 238 239static void 240u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, 241 unsigned vb_mask, unsigned out_vb, 242 int start_vertex, unsigned num_vertices, 243 int start_index, unsigned num_indices, int min_index, 244 boolean unroll_indices) 245{ 246 struct translate *tr; 247 struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; 248 struct pipe_resource *out_buffer = NULL; 249 uint8_t *out_map; 250 unsigned i, out_offset; 251 252 /* Get a translate object. */ 253 tr = translate_cache_find(mgr->translate_cache, key); 254 255 /* Map buffers we want to translate. */ 256 for (i = 0; i < mgr->nr_vertex_buffers; i++) { 257 if (vb_mask & (1 << i)) { 258 struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[i]; 259 unsigned offset = vb->buffer_offset + vb->stride * start_vertex; 260 uint8_t *map; 261 262 if (vb->buffer->user_ptr) { 263 map = vb->buffer->user_ptr + offset; 264 } else { 265 unsigned size = vb->stride ? num_vertices * vb->stride 266 : sizeof(double)*4; 267 268 if (offset+size > vb->buffer->width0) { 269 size = vb->buffer->width0 - offset; 270 } 271 272 map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size, 273 PIPE_TRANSFER_READ, &vb_transfer[i]); 274 } 275 276 /* Subtract min_index so that indexing with the index buffer works. */ 277 if (unroll_indices) { 278 map -= vb->stride * min_index; 279 } 280 281 tr->set_buffer(tr, i, map, vb->stride, ~0); 282 } 283 } 284 285 /* Translate. */ 286 if (unroll_indices) { 287 struct pipe_index_buffer *ib = &mgr->index_buffer; 288 struct pipe_transfer *transfer = NULL; 289 unsigned offset = ib->offset + start_index * ib->index_size; 290 uint8_t *map; 291 292 assert(ib->buffer && ib->index_size); 293 294 if (ib->buffer->user_ptr) { 295 map = ib->buffer->user_ptr + offset; 296 } else { 297 map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset, 298 num_indices * ib->index_size, 299 PIPE_TRANSFER_READ, &transfer); 300 } 301 302 /* Create and map the output buffer. */ 303 u_upload_alloc(mgr->uploader, 0, 304 key->output_stride * num_indices, 305 &out_offset, &out_buffer, 306 (void**)&out_map); 307 308 switch (ib->index_size) { 309 case 4: 310 tr->run_elts(tr, (unsigned*)map, num_indices, 0, out_map); 311 break; 312 case 2: 313 tr->run_elts16(tr, (uint16_t*)map, num_indices, 0, out_map); 314 break; 315 case 1: 316 tr->run_elts8(tr, map, num_indices, 0, out_map); 317 break; 318 } 319 320 if (transfer) { 321 pipe_buffer_unmap(mgr->pipe, transfer); 322 } 323 } else { 324 /* Create and map the output buffer. */ 325 u_upload_alloc(mgr->uploader, 326 key->output_stride * start_vertex, 327 key->output_stride * num_vertices, 328 &out_offset, &out_buffer, 329 (void**)&out_map); 330 331 out_offset -= key->output_stride * start_vertex; 332 333 tr->run(tr, 0, num_vertices, 0, out_map); 334 } 335 336 /* Unmap all buffers. */ 337 for (i = 0; i < mgr->nr_vertex_buffers; i++) { 338 if (vb_transfer[i]) { 339 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); 340 } 341 } 342 343 /* Setup the new vertex buffer. */ 344 mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset; 345 mgr->real_vertex_buffer[out_vb].stride = key->output_stride; 346 347 /* Move the buffer reference. */ 348 pipe_resource_reference( 349 &mgr->real_vertex_buffer[out_vb].buffer, NULL); 350 mgr->real_vertex_buffer[out_vb].buffer = out_buffer; 351} 352 353static boolean 354u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr, 355 unsigned mask[VB_NUM]) 356{ 357 unsigned i, type; 358 unsigned nr = mgr->ve->count; 359 boolean used_vb[PIPE_MAX_ATTRIBS] = {0}; 360 unsigned fallback_vbs[VB_NUM]; 361 362 memset(fallback_vbs, ~0, sizeof(fallback_vbs)); 363 364 /* Mark used vertex buffers as... used. */ 365 for (i = 0; i < nr; i++) { 366 if (!mgr->ve->incompatible_layout_elem[i]) { 367 unsigned index = mgr->ve->ve[i].vertex_buffer_index; 368 369 if (!mgr->incompatible_vb[index]) { 370 used_vb[index] = TRUE; 371 } 372 } 373 } 374 375 /* Find free slots for each type if needed. */ 376 i = 0; 377 for (type = 0; type < VB_NUM; type++) { 378 if (mask[type]) { 379 for (; i < PIPE_MAX_ATTRIBS; i++) { 380 if (!used_vb[i]) { 381 /*printf("found slot=%i for type=%i\n", i, type);*/ 382 fallback_vbs[type] = i; 383 i++; 384 if (i > mgr->nr_real_vertex_buffers) { 385 mgr->nr_real_vertex_buffers = i; 386 } 387 break; 388 } 389 } 390 if (i == PIPE_MAX_ATTRIBS) { 391 /* fail, reset the number to its original value */ 392 mgr->nr_real_vertex_buffers = mgr->nr_vertex_buffers; 393 return FALSE; 394 } 395 } 396 } 397 398 memcpy(mgr->fallback_vbs, fallback_vbs, sizeof(fallback_vbs)); 399 return TRUE; 400} 401 402static boolean 403u_vbuf_translate_begin(struct u_vbuf *mgr, 404 int start_vertex, unsigned num_vertices, 405 int start_instance, unsigned num_instances, 406 int start_index, unsigned num_indices, int min_index, 407 boolean unroll_indices) 408{ 409 unsigned mask[VB_NUM] = {0}; 410 struct translate_key key[VB_NUM]; 411 unsigned elem_index[VB_NUM][PIPE_MAX_ATTRIBS]; /* ... into key.elements */ 412 unsigned i, type; 413 414 int start[VB_NUM] = { 415 start_vertex, /* VERTEX */ 416 start_instance, /* INSTANCE */ 417 0 /* CONST */ 418 }; 419 420 unsigned num[VB_NUM] = { 421 num_vertices, /* VERTEX */ 422 num_instances, /* INSTANCE */ 423 1 /* CONST */ 424 }; 425 426 memset(key, 0, sizeof(key)); 427 memset(elem_index, ~0, sizeof(elem_index)); 428 429 /* See if there are vertex attribs of each type to translate and 430 * which ones. */ 431 for (i = 0; i < mgr->ve->count; i++) { 432 unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index; 433 434 if (!mgr->vertex_buffer[vb_index].stride) { 435 if (!mgr->ve->incompatible_layout_elem[i] && 436 !mgr->incompatible_vb[vb_index]) { 437 continue; 438 } 439 mask[VB_CONST] |= 1 << vb_index; 440 } else if (mgr->ve->ve[i].instance_divisor) { 441 if (!mgr->ve->incompatible_layout_elem[i] && 442 !mgr->incompatible_vb[vb_index]) { 443 continue; 444 } 445 mask[VB_INSTANCE] |= 1 << vb_index; 446 } else { 447 if (!unroll_indices && 448 !mgr->ve->incompatible_layout_elem[i] && 449 !mgr->incompatible_vb[vb_index]) { 450 continue; 451 } 452 mask[VB_VERTEX] |= 1 << vb_index; 453 } 454 } 455 456 assert(mask[VB_VERTEX] || mask[VB_INSTANCE] || mask[VB_CONST]); 457 458 /* Find free vertex buffer slots. */ 459 if (!u_vbuf_translate_find_free_vb_slots(mgr, mask)) { 460 return FALSE; 461 } 462 463 /* Initialize the translate keys. */ 464 for (i = 0; i < mgr->ve->count; i++) { 465 struct translate_key *k; 466 struct translate_element *te; 467 unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index; 468 bit = 1 << vb_index; 469 470 if (!mgr->ve->incompatible_layout_elem[i] && 471 !mgr->incompatible_vb[vb_index] && 472 (!unroll_indices || !(mask[VB_VERTEX] & bit))) { 473 continue; 474 } 475 476 /* Set type to what we will translate. 477 * Whether vertex, instance, or constant attribs. */ 478 for (type = 0; type < VB_NUM; type++) { 479 if (mask[type] & bit) { 480 break; 481 } 482 } 483 assert(type < VB_NUM); 484 assert(translate_is_output_format_supported(mgr->ve->native_format[i])); 485 /*printf("velem=%i type=%i\n", i, type);*/ 486 487 /* Add the vertex element. */ 488 k = &key[type]; 489 elem_index[type][i] = k->nr_elements; 490 491 te = &k->element[k->nr_elements]; 492 te->type = TRANSLATE_ELEMENT_NORMAL; 493 te->instance_divisor = 0; 494 te->input_buffer = vb_index; 495 te->input_format = mgr->ve->ve[i].src_format; 496 te->input_offset = mgr->ve->ve[i].src_offset; 497 te->output_format = mgr->ve->native_format[i]; 498 te->output_offset = k->output_stride; 499 500 k->output_stride += mgr->ve->native_format_size[i]; 501 k->nr_elements++; 502 } 503 504 /* Translate buffers. */ 505 for (type = 0; type < VB_NUM; type++) { 506 if (key[type].nr_elements) { 507 u_vbuf_translate_buffers(mgr, &key[type], mask[type], 508 mgr->fallback_vbs[type], 509 start[type], num[type], 510 start_index, num_indices, min_index, 511 unroll_indices && type == VB_VERTEX); 512 513 /* Fixup the stride for constant attribs. */ 514 if (type == VB_CONST) { 515 mgr->real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0; 516 } 517 } 518 } 519 520 /* Setup new vertex elements. */ 521 for (i = 0; i < mgr->ve->count; i++) { 522 for (type = 0; type < VB_NUM; type++) { 523 if (elem_index[type][i] < key[type].nr_elements) { 524 struct translate_element *te = &key[type].element[elem_index[type][i]]; 525 mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; 526 mgr->fallback_velems[i].src_format = te->output_format; 527 mgr->fallback_velems[i].src_offset = te->output_offset; 528 mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vbs[type]; 529 530 /* elem_index[type][i] can only be set for one type. */ 531 assert(type > VB_INSTANCE || elem_index[type+1][i] == ~0); 532 assert(type > VB_VERTEX || elem_index[type+2][i] == ~0); 533 break; 534 } 535 } 536 /* No translating, just copy the original vertex element over. */ 537 if (type == VB_NUM) { 538 memcpy(&mgr->fallback_velems[i], &mgr->ve->ve[i], 539 sizeof(struct pipe_vertex_element)); 540 } 541 } 542 543 u_vbuf_set_vertex_elements_internal(mgr, mgr->ve->count, 544 mgr->fallback_velems); 545 mgr->using_translate = TRUE; 546 return TRUE; 547} 548 549static void u_vbuf_translate_end(struct u_vbuf *mgr) 550{ 551 unsigned i; 552 553 /* Restore vertex elements. */ 554 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->ve->driver_cso); 555 mgr->using_translate = FALSE; 556 557 /* Unreference the now-unused VBOs. */ 558 for (i = 0; i < VB_NUM; i++) { 559 unsigned vb = mgr->fallback_vbs[i]; 560 if (vb != ~0) { 561 pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer, NULL); 562 mgr->fallback_vbs[i] = ~0; 563 } 564 } 565 mgr->nr_real_vertex_buffers = mgr->nr_vertex_buffers; 566} 567 568#define FORMAT_REPLACE(what, withwhat) \ 569 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break 570 571static void * 572u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count, 573 const struct pipe_vertex_element *attribs) 574{ 575 struct pipe_context *pipe = mgr->pipe; 576 unsigned i; 577 struct pipe_vertex_element native_attribs[PIPE_MAX_ATTRIBS]; 578 struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements); 579 580 ve->count = count; 581 582 memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count); 583 memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count); 584 585 /* Set the best native format in case the original format is not 586 * supported. */ 587 for (i = 0; i < count; i++) { 588 enum pipe_format format = ve->ve[i].src_format; 589 590 ve->src_format_size[i] = util_format_get_blocksize(format); 591 592 /* Choose a native format. 593 * For now we don't care about the alignment, that's going to 594 * be sorted out later. */ 595 if (!mgr->caps.format_fixed32) { 596 switch (format) { 597 FORMAT_REPLACE(R32_FIXED, R32_FLOAT); 598 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); 599 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT); 600 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT); 601 default:; 602 } 603 } 604 if (!mgr->caps.format_float16) { 605 switch (format) { 606 FORMAT_REPLACE(R16_FLOAT, R32_FLOAT); 607 FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT); 608 FORMAT_REPLACE(R16G16B16_FLOAT, R32G32B32_FLOAT); 609 FORMAT_REPLACE(R16G16B16A16_FLOAT, R32G32B32A32_FLOAT); 610 default:; 611 } 612 } 613 if (!mgr->caps.format_float64) { 614 switch (format) { 615 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); 616 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); 617 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT); 618 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT); 619 default:; 620 } 621 } 622 if (!mgr->caps.format_norm32) { 623 switch (format) { 624 FORMAT_REPLACE(R32_UNORM, R32_FLOAT); 625 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); 626 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT); 627 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT); 628 FORMAT_REPLACE(R32_SNORM, R32_FLOAT); 629 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT); 630 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT); 631 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT); 632 default:; 633 } 634 } 635 if (!mgr->caps.format_scaled32) { 636 switch (format) { 637 FORMAT_REPLACE(R32_USCALED, R32_FLOAT); 638 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); 639 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT); 640 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT); 641 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT); 642 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT); 643 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT); 644 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT); 645 default:; 646 } 647 } 648 649 native_attribs[i].src_format = format; 650 ve->native_format[i] = format; 651 ve->native_format_size[i] = 652 util_format_get_blocksize(ve->native_format[i]); 653 654 ve->incompatible_layout_elem[i] = 655 ve->ve[i].src_format != ve->native_format[i] || 656 (!mgr->caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0); 657 ve->incompatible_layout = 658 ve->incompatible_layout || 659 ve->incompatible_layout_elem[i]; 660 } 661 662 /* Align the formats to the size of DWORD if needed. */ 663 if (!mgr->caps.fetch_dword_unaligned) { 664 for (i = 0; i < count; i++) { 665 ve->native_format_size[i] = align(ve->native_format_size[i], 4); 666 } 667 } 668 669 ve->driver_cso = 670 pipe->create_vertex_elements_state(pipe, count, native_attribs); 671 return ve; 672} 673 674static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso) 675{ 676 struct pipe_context *pipe = mgr->pipe; 677 struct u_vbuf_elements *ve = cso; 678 679 pipe->delete_vertex_elements_state(pipe, ve->driver_cso); 680 FREE(ve); 681} 682 683void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, unsigned count, 684 const struct pipe_vertex_buffer *bufs) 685{ 686 unsigned i; 687 688 mgr->any_user_vbs = FALSE; 689 mgr->incompatible_vb_layout = FALSE; 690 memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); 691 692 if (!mgr->caps.fetch_dword_unaligned) { 693 /* Check if the strides and offsets are aligned to the size of DWORD. */ 694 for (i = 0; i < count; i++) { 695 if (bufs[i].buffer) { 696 if (bufs[i].stride % 4 != 0 || 697 bufs[i].buffer_offset % 4 != 0) { 698 mgr->incompatible_vb_layout = TRUE; 699 mgr->incompatible_vb[i] = TRUE; 700 } 701 } 702 } 703 } 704 705 for (i = 0; i < count; i++) { 706 const struct pipe_vertex_buffer *vb = &bufs[i]; 707 708 pipe_resource_reference(&mgr->vertex_buffer[i].buffer, vb->buffer); 709 710 mgr->real_vertex_buffer[i].buffer_offset = 711 mgr->vertex_buffer[i].buffer_offset = vb->buffer_offset; 712 713 mgr->real_vertex_buffer[i].stride = 714 mgr->vertex_buffer[i].stride = vb->stride; 715 716 if (!vb->buffer || 717 mgr->incompatible_vb[i]) { 718 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); 719 continue; 720 } 721 722 if (vb->buffer->user_ptr) { 723 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); 724 mgr->any_user_vbs = TRUE; 725 continue; 726 } 727 728 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, vb->buffer); 729 } 730 731 for (i = count; i < mgr->nr_vertex_buffers; i++) { 732 pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); 733 } 734 for (i = count; i < mgr->nr_real_vertex_buffers; i++) { 735 pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); 736 } 737 738 mgr->nr_vertex_buffers = count; 739 mgr->nr_real_vertex_buffers = count; 740 mgr->vertex_buffers_dirty = TRUE; 741} 742 743void u_vbuf_set_index_buffer(struct u_vbuf *mgr, 744 const struct pipe_index_buffer *ib) 745{ 746 struct pipe_context *pipe = mgr->pipe; 747 748 if (ib && ib->buffer) { 749 assert(ib->offset % ib->index_size == 0); 750 pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); 751 mgr->index_buffer.offset = ib->offset; 752 mgr->index_buffer.index_size = ib->index_size; 753 } else { 754 pipe_resource_reference(&mgr->index_buffer.buffer, NULL); 755 } 756 757 pipe->set_index_buffer(pipe, ib); 758} 759 760static void 761u_vbuf_upload_buffers(struct u_vbuf *mgr, 762 int start_vertex, unsigned num_vertices, 763 int start_instance, unsigned num_instances) 764{ 765 unsigned i; 766 unsigned nr_velems = mgr->ve->count; 767 unsigned nr_vbufs = mgr->nr_vertex_buffers; 768 struct pipe_vertex_element *velems = 769 mgr->using_translate ? mgr->fallback_velems : mgr->ve->ve; 770 unsigned start_offset[PIPE_MAX_ATTRIBS]; 771 unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; 772 773 /* Determine how much data needs to be uploaded. */ 774 for (i = 0; i < nr_velems; i++) { 775 struct pipe_vertex_element *velem = &velems[i]; 776 unsigned index = velem->vertex_buffer_index; 777 struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index]; 778 unsigned instance_div, first, size; 779 780 /* Skip the buffers generated by translate. */ 781 if (index == mgr->fallback_vbs[VB_VERTEX] || 782 index == mgr->fallback_vbs[VB_INSTANCE] || 783 index == mgr->fallback_vbs[VB_CONST]) { 784 continue; 785 } 786 787 assert(vb->buffer); 788 789 if (!vb->buffer->user_ptr) { 790 continue; 791 } 792 793 instance_div = velem->instance_divisor; 794 first = vb->buffer_offset + velem->src_offset; 795 796 if (!vb->stride) { 797 /* Constant attrib. */ 798 size = mgr->ve->src_format_size[i]; 799 } else if (instance_div) { 800 /* Per-instance attrib. */ 801 unsigned count = (num_instances + instance_div - 1) / instance_div; 802 first += vb->stride * start_instance; 803 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; 804 } else { 805 /* Per-vertex attrib. */ 806 first += vb->stride * start_vertex; 807 size = vb->stride * (num_vertices - 1) + mgr->ve->src_format_size[i]; 808 } 809 810 /* Update offsets. */ 811 if (!end_offset[index]) { 812 start_offset[index] = first; 813 end_offset[index] = first + size; 814 } else { 815 if (first < start_offset[index]) 816 start_offset[index] = first; 817 if (first + size > end_offset[index]) 818 end_offset[index] = first + size; 819 } 820 } 821 822 /* Upload buffers. */ 823 for (i = 0; i < nr_vbufs; i++) { 824 unsigned start, end = end_offset[i]; 825 struct pipe_vertex_buffer *real_vb; 826 uint8_t *ptr; 827 828 if (!end) { 829 continue; 830 } 831 832 start = start_offset[i]; 833 assert(start < end); 834 835 real_vb = &mgr->real_vertex_buffer[i]; 836 ptr = mgr->vertex_buffer[i].buffer->user_ptr; 837 838 u_upload_data(mgr->uploader, start, end - start, ptr + start, 839 &real_vb->buffer_offset, &real_vb->buffer); 840 841 real_vb->buffer_offset -= start; 842 } 843} 844 845static boolean u_vbuf_need_minmax_index(struct u_vbuf *mgr) 846{ 847 unsigned i, nr = mgr->ve->count; 848 849 for (i = 0; i < nr; i++) { 850 struct pipe_vertex_buffer *vb; 851 unsigned index; 852 853 /* Per-instance attribs don't need min/max_index. */ 854 if (mgr->ve->ve[i].instance_divisor) { 855 continue; 856 } 857 858 index = mgr->ve->ve[i].vertex_buffer_index; 859 vb = &mgr->vertex_buffer[index]; 860 861 /* Constant attribs don't need min/max_index. */ 862 if (!vb->stride) { 863 continue; 864 } 865 866 /* Per-vertex attribs need min/max_index. */ 867 if (vb->buffer->user_ptr || 868 mgr->ve->incompatible_layout_elem[i] || 869 mgr->incompatible_vb[index]) { 870 return TRUE; 871 } 872 } 873 874 return FALSE; 875} 876 877static boolean u_vbuf_mapping_vertex_buffer_blocks(struct u_vbuf *mgr) 878{ 879 unsigned i, nr = mgr->ve->count; 880 881 for (i = 0; i < nr; i++) { 882 struct pipe_vertex_buffer *vb; 883 unsigned index; 884 885 /* Per-instance attribs are not per-vertex data. */ 886 if (mgr->ve->ve[i].instance_divisor) { 887 continue; 888 } 889 890 index = mgr->ve->ve[i].vertex_buffer_index; 891 vb = &mgr->vertex_buffer[index]; 892 893 /* Constant attribs are not per-vertex data. */ 894 if (!vb->stride) { 895 continue; 896 } 897 898 /* Return true for the hw buffers which don't need to be translated. */ 899 /* XXX we could use some kind of a is-busy query. */ 900 if (!vb->buffer->user_ptr && 901 !mgr->ve->incompatible_layout_elem[i] && 902 !mgr->incompatible_vb[index]) { 903 return TRUE; 904 } 905 } 906 907 return FALSE; 908} 909 910static void u_vbuf_get_minmax_index(struct pipe_context *pipe, 911 struct pipe_index_buffer *ib, 912 const struct pipe_draw_info *info, 913 int *out_min_index, 914 int *out_max_index) 915{ 916 struct pipe_transfer *transfer = NULL; 917 const void *indices; 918 unsigned i; 919 unsigned restart_index = info->restart_index; 920 921 if (ib->buffer->user_ptr) { 922 indices = ib->buffer->user_ptr + 923 ib->offset + info->start * ib->index_size; 924 } else { 925 indices = pipe_buffer_map_range(pipe, ib->buffer, 926 ib->offset + info->start * ib->index_size, 927 info->count * ib->index_size, 928 PIPE_TRANSFER_READ, &transfer); 929 } 930 931 switch (ib->index_size) { 932 case 4: { 933 const unsigned *ui_indices = (const unsigned*)indices; 934 unsigned max_ui = 0; 935 unsigned min_ui = ~0U; 936 if (info->primitive_restart) { 937 for (i = 0; i < info->count; i++) { 938 if (ui_indices[i] != restart_index) { 939 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 940 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 941 } 942 } 943 } 944 else { 945 for (i = 0; i < info->count; i++) { 946 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 947 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 948 } 949 } 950 *out_min_index = min_ui; 951 *out_max_index = max_ui; 952 break; 953 } 954 case 2: { 955 const unsigned short *us_indices = (const unsigned short*)indices; 956 unsigned max_us = 0; 957 unsigned min_us = ~0U; 958 if (info->primitive_restart) { 959 for (i = 0; i < info->count; i++) { 960 if (us_indices[i] != restart_index) { 961 if (us_indices[i] > max_us) max_us = us_indices[i]; 962 if (us_indices[i] < min_us) min_us = us_indices[i]; 963 } 964 } 965 } 966 else { 967 for (i = 0; i < info->count; i++) { 968 if (us_indices[i] > max_us) max_us = us_indices[i]; 969 if (us_indices[i] < min_us) min_us = us_indices[i]; 970 } 971 } 972 *out_min_index = min_us; 973 *out_max_index = max_us; 974 break; 975 } 976 case 1: { 977 const unsigned char *ub_indices = (const unsigned char*)indices; 978 unsigned max_ub = 0; 979 unsigned min_ub = ~0U; 980 if (info->primitive_restart) { 981 for (i = 0; i < info->count; i++) { 982 if (ub_indices[i] != restart_index) { 983 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 984 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 985 } 986 } 987 } 988 else { 989 for (i = 0; i < info->count; i++) { 990 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 991 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 992 } 993 } 994 *out_min_index = min_ub; 995 *out_max_index = max_ub; 996 break; 997 } 998 default: 999 assert(0); 1000 *out_min_index = 0; 1001 *out_max_index = 0; 1002 } 1003 1004 if (transfer) { 1005 pipe_buffer_unmap(pipe, transfer); 1006 } 1007} 1008 1009void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info) 1010{ 1011 struct pipe_context *pipe = mgr->pipe; 1012 int start_vertex, min_index; 1013 unsigned num_vertices; 1014 boolean unroll_indices = FALSE; 1015 1016 /* Normal draw. No fallback and no user buffers. */ 1017 if (!mgr->incompatible_vb_layout && 1018 !mgr->ve->incompatible_layout && 1019 !mgr->any_user_vbs) { 1020 /* Set vertex buffers if needed. */ 1021 if (mgr->vertex_buffers_dirty) { 1022 pipe->set_vertex_buffers(pipe, mgr->nr_real_vertex_buffers, 1023 mgr->real_vertex_buffer); 1024 mgr->vertex_buffers_dirty = FALSE; 1025 } 1026 1027 pipe->draw_vbo(pipe, info); 1028 return; 1029 } 1030 1031 if (info->indexed) { 1032 int max_index; 1033 boolean index_bounds_valid = FALSE; 1034 1035 if (info->max_index != ~0) { 1036 min_index = info->min_index; 1037 max_index = info->max_index; 1038 index_bounds_valid = TRUE; 1039 } else if (u_vbuf_need_minmax_index(mgr)) { 1040 u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer, info, 1041 &min_index, &max_index); 1042 index_bounds_valid = TRUE; 1043 } 1044 1045 /* If the index bounds are valid, it means some upload or translation 1046 * of per-vertex attribs will be performed. */ 1047 if (index_bounds_valid) { 1048 assert(min_index <= max_index); 1049 1050 start_vertex = min_index + info->index_bias; 1051 num_vertices = max_index + 1 - min_index; 1052 1053 /* Primitive restart doesn't work when unrolling indices. 1054 * We would have to break this drawing operation into several ones. */ 1055 /* Use some heuristic to see if unrolling indices improves 1056 * performance. */ 1057 if (!info->primitive_restart && 1058 num_vertices > info->count*2 && 1059 num_vertices-info->count > 32 && 1060 !u_vbuf_mapping_vertex_buffer_blocks(mgr)) { 1061 /*printf("num_vertices=%i count=%i\n", num_vertices, info->count);*/ 1062 unroll_indices = TRUE; 1063 } 1064 } else { 1065 /* Nothing to do for per-vertex attribs. */ 1066 start_vertex = 0; 1067 num_vertices = 0; 1068 min_index = 0; 1069 } 1070 } else { 1071 start_vertex = info->start; 1072 num_vertices = info->count; 1073 min_index = 0; 1074 } 1075 1076 /* Translate vertices with non-native layouts or formats. */ 1077 if (unroll_indices || 1078 mgr->incompatible_vb_layout || 1079 mgr->ve->incompatible_layout) { 1080 /* XXX check the return value */ 1081 u_vbuf_translate_begin(mgr, start_vertex, num_vertices, 1082 info->start_instance, info->instance_count, 1083 info->start, info->count, min_index, 1084 unroll_indices); 1085 } 1086 1087 /* Upload user buffers. */ 1088 if (mgr->any_user_vbs) { 1089 u_vbuf_upload_buffers(mgr, start_vertex, num_vertices, 1090 info->start_instance, info->instance_count); 1091 } 1092 1093 /* 1094 if (unroll_indices) { 1095 printf("unrolling indices: start_vertex = %i, num_vertices = %i\n", 1096 start_vertex, num_vertices); 1097 util_dump_draw_info(stdout, info); 1098 printf("\n"); 1099 } 1100 1101 unsigned i; 1102 for (i = 0; i < mgr->nr_vertex_buffers; i++) { 1103 printf("input %i: ", i); 1104 util_dump_vertex_buffer(stdout, mgr->vertex_buffer+i); 1105 printf("\n"); 1106 } 1107 for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { 1108 printf("real %i: ", i); 1109 util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i); 1110 printf("\n"); 1111 } 1112 */ 1113 1114 u_upload_unmap(mgr->uploader); 1115 pipe->set_vertex_buffers(pipe, mgr->nr_real_vertex_buffers, 1116 mgr->real_vertex_buffer); 1117 1118 if (unlikely(unroll_indices)) { 1119 struct pipe_draw_info new_info = *info; 1120 new_info.indexed = FALSE; 1121 new_info.index_bias = 0; 1122 new_info.min_index = 0; 1123 new_info.max_index = info->count - 1; 1124 new_info.start = 0; 1125 1126 pipe->draw_vbo(pipe, &new_info); 1127 } else { 1128 pipe->draw_vbo(pipe, info); 1129 } 1130 1131 if (mgr->using_translate) { 1132 u_vbuf_translate_end(mgr); 1133 } 1134 mgr->vertex_buffers_dirty = TRUE; 1135} 1136 1137void u_vbuf_save_vertex_elements(struct u_vbuf *mgr) 1138{ 1139 assert(!mgr->ve_saved); 1140 mgr->ve_saved = mgr->ve; 1141} 1142 1143void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr) 1144{ 1145 if (mgr->ve != mgr->ve_saved) { 1146 struct pipe_context *pipe = mgr->pipe; 1147 1148 mgr->ve = mgr->ve_saved; 1149 pipe->bind_vertex_elements_state(pipe, 1150 mgr->ve ? mgr->ve->driver_cso : NULL); 1151 } 1152 mgr->ve_saved = NULL; 1153} 1154 1155void u_vbuf_save_vertex_buffers(struct u_vbuf *mgr) 1156{ 1157 util_copy_vertex_buffers(mgr->vertex_buffer_saved, 1158 &mgr->nr_vertex_buffers_saved, 1159 mgr->vertex_buffer, 1160 mgr->nr_vertex_buffers); 1161} 1162 1163void u_vbuf_restore_vertex_buffers(struct u_vbuf *mgr) 1164{ 1165 unsigned i; 1166 1167 u_vbuf_set_vertex_buffers(mgr, mgr->nr_vertex_buffers_saved, 1168 mgr->vertex_buffer_saved); 1169 for (i = 0; i < mgr->nr_vertex_buffers_saved; i++) { 1170 pipe_resource_reference(&mgr->vertex_buffer_saved[i].buffer, NULL); 1171 } 1172 mgr->nr_vertex_buffers_saved = 0; 1173} 1174