u_vbuf.c revision 64242b23c1893dd6e1c048beee0e1573aeaf1abc
1/************************************************************************** 2 * 3 * Copyright 2011 Marek Olšák <maraeo@gmail.com> 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "util/u_vbuf.h" 29 30#include "util/u_dump.h" 31#include "util/u_format.h" 32#include "util/u_inlines.h" 33#include "util/u_memory.h" 34#include "util/u_upload_mgr.h" 35#include "translate/translate.h" 36#include "translate/translate_cache.h" 37 38struct u_vbuf_elements { 39 unsigned count; 40 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS]; 41 42 unsigned src_format_size[PIPE_MAX_ATTRIBS]; 43 44 /* If (velem[i].src_format != native_format[i]), the vertex buffer 45 * referenced by the vertex element cannot be used for rendering and 46 * its vertex data must be translated to native_format[i]. */ 47 enum pipe_format native_format[PIPE_MAX_ATTRIBS]; 48 unsigned native_format_size[PIPE_MAX_ATTRIBS]; 49 50 /* This might mean two things: 51 * - src_format != native_format, as discussed above. 52 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */ 53 boolean incompatible_layout; 54 /* Per-element flags. */ 55 boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS]; 56}; 57 58enum { 59 VB_VERTEX = 0, 60 VB_INSTANCE = 1, 61 VB_CONST = 2, 62 VB_NUM = 3 63}; 64 65struct u_vbuf_priv { 66 struct u_vbuf b; 67 struct pipe_context *pipe; 68 struct translate_cache *translate_cache; 69 70 /* Vertex element state bound by the state tracker. */ 71 void *saved_ve; 72 /* and its associated helper structure for this module. */ 73 struct u_vbuf_elements *ve; 74 75 /* Vertex elements used for the translate fallback. */ 76 struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS]; 77 /* If non-NULL, this is a vertex element state used for the translate 78 * fallback and therefore used for rendering too. */ 79 void *fallback_ve; 80 /* The vertex buffer slot index where translated vertices have been 81 * stored in. */ 82 unsigned fallback_vbs[VB_NUM]; 83 /* When binding the fallback vertex element state, we don't want to 84 * change saved_ve and ve. This is set to TRUE in such cases. */ 85 boolean ve_binding_lock; 86 87 /* Whether there is any user buffer. */ 88 boolean any_user_vbs; 89 /* Whether there is a buffer with a non-native layout. */ 90 boolean incompatible_vb_layout; 91 /* Per-buffer flags. */ 92 boolean incompatible_vb[PIPE_MAX_ATTRIBS]; 93}; 94 95static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr) 96{ 97 struct pipe_screen *screen = mgr->pipe->screen; 98 99 mgr->b.caps.format_fixed32 = 100 screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER, 101 0, PIPE_BIND_VERTEX_BUFFER); 102 103 mgr->b.caps.format_float16 = 104 screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER, 105 0, PIPE_BIND_VERTEX_BUFFER); 106 107 mgr->b.caps.format_float64 = 108 screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER, 109 0, PIPE_BIND_VERTEX_BUFFER); 110 111 mgr->b.caps.format_norm32 = 112 screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER, 113 0, PIPE_BIND_VERTEX_BUFFER) && 114 screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER, 115 0, PIPE_BIND_VERTEX_BUFFER); 116 117 mgr->b.caps.format_scaled32 = 118 screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER, 119 0, PIPE_BIND_VERTEX_BUFFER) && 120 screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER, 121 0, PIPE_BIND_VERTEX_BUFFER); 122} 123 124struct u_vbuf * 125u_vbuf_create(struct pipe_context *pipe, 126 unsigned upload_buffer_size, 127 unsigned upload_buffer_alignment, 128 unsigned upload_buffer_bind, 129 enum u_fetch_alignment fetch_alignment) 130{ 131 struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv); 132 133 mgr->pipe = pipe; 134 mgr->translate_cache = translate_cache_create(); 135 memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs)); 136 137 mgr->b.uploader = u_upload_create(pipe, upload_buffer_size, 138 upload_buffer_alignment, 139 upload_buffer_bind); 140 141 mgr->b.caps.fetch_dword_unaligned = 142 fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED; 143 144 u_vbuf_init_format_caps(mgr); 145 146 return &mgr->b; 147} 148 149void u_vbuf_destroy(struct u_vbuf *mgrb) 150{ 151 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 152 unsigned i; 153 154 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 155 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 156 } 157 for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { 158 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 159 } 160 161 translate_cache_destroy(mgr->translate_cache); 162 u_upload_destroy(mgr->b.uploader); 163 FREE(mgr); 164} 165 166static void 167u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, 168 struct translate_key *key, 169 unsigned vb_mask, 170 unsigned out_vb, 171 int start, unsigned count) 172{ 173 struct translate *tr; 174 struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; 175 struct pipe_resource *out_buffer = NULL; 176 uint8_t *out_map; 177 unsigned i, out_offset; 178 179 /* Get a translate object. */ 180 tr = translate_cache_find(mgr->translate_cache, key); 181 182 /* Map buffers we want to translate. */ 183 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 184 if (vb_mask & (1 << i)) { 185 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; 186 unsigned offset = vb->buffer_offset + vb->stride * start; 187 uint8_t *map; 188 189 if (u_vbuf_resource(vb->buffer)->user_ptr) { 190 map = u_vbuf_resource(vb->buffer)->user_ptr + offset; 191 } else { 192 unsigned size = vb->stride ? count * vb->stride 193 : sizeof(double)*4; 194 195 if (offset+size > vb->buffer->width0) { 196 size = vb->buffer->width0 - offset; 197 } 198 199 map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size, 200 PIPE_TRANSFER_READ, &vb_transfer[i]); 201 } 202 203 tr->set_buffer(tr, i, map, vb->stride, ~0); 204 } 205 } 206 207 /* Create and map the output buffer. */ 208 u_upload_alloc(mgr->b.uploader, 209 key->output_stride * start, 210 key->output_stride * count, 211 &out_offset, &out_buffer, 212 (void**)&out_map); 213 214 out_offset -= key->output_stride * start; 215 216 /* Translate. */ 217 tr->run(tr, 0, count, 0, out_map); 218 219 /* Unmap all buffers. */ 220 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 221 if (vb_transfer[i]) { 222 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); 223 } 224 } 225 226 /* Setup the new vertex buffer. */ 227 mgr->b.real_vertex_buffer[out_vb].buffer_offset = out_offset; 228 mgr->b.real_vertex_buffer[out_vb].stride = key->output_stride; 229 230 /* Move the buffer reference. */ 231 pipe_resource_reference( 232 &mgr->b.real_vertex_buffer[out_vb].buffer, NULL); 233 mgr->b.real_vertex_buffer[out_vb].buffer = out_buffer; 234} 235 236static boolean 237u_vbuf_translate_find_free_vb_slots(struct u_vbuf_priv *mgr, 238 unsigned mask[VB_NUM]) 239{ 240 unsigned i, type; 241 unsigned nr = mgr->ve->count; 242 boolean used_vb[PIPE_MAX_ATTRIBS] = {0}; 243 unsigned fallback_vbs[VB_NUM]; 244 245 memset(fallback_vbs, ~0, sizeof(fallback_vbs)); 246 247 /* Mark used vertex buffers as... used. */ 248 for (i = 0; i < nr; i++) { 249 if (!mgr->ve->incompatible_layout_elem[i]) { 250 unsigned index = mgr->ve->ve[i].vertex_buffer_index; 251 252 if (!mgr->incompatible_vb[index]) { 253 used_vb[index] = TRUE; 254 } 255 } 256 } 257 258 /* Find free slots for each type if needed. */ 259 i = 0; 260 for (type = 0; type < VB_NUM; type++) { 261 if (mask[type]) { 262 for (; i < PIPE_MAX_ATTRIBS; i++) { 263 if (!used_vb[i]) { 264 /*printf("found slot=%i for type=%i\n", i, type);*/ 265 fallback_vbs[type] = i; 266 i++; 267 if (i > mgr->b.nr_real_vertex_buffers) { 268 mgr->b.nr_real_vertex_buffers = i; 269 } 270 break; 271 } 272 } 273 if (i == PIPE_MAX_ATTRIBS) { 274 /* fail, reset the number to its original value */ 275 mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; 276 return FALSE; 277 } 278 } 279 } 280 281 memcpy(mgr->fallback_vbs, fallback_vbs, sizeof(fallback_vbs)); 282 return TRUE; 283} 284 285static boolean 286u_vbuf_translate_begin(struct u_vbuf_priv *mgr, 287 int start_vertex, unsigned num_vertices, 288 int start_instance, unsigned num_instances) 289{ 290 unsigned mask[VB_NUM] = {0}; 291 struct translate_key key[VB_NUM]; 292 unsigned elem_index[VB_NUM][PIPE_MAX_ATTRIBS]; /* ... into key.elements */ 293 unsigned i, type; 294 295 int start[VB_NUM] = { 296 start_vertex, /* VERTEX */ 297 start_instance, /* INSTANCE */ 298 0 /* CONST */ 299 }; 300 301 unsigned count[VB_NUM] = { 302 num_vertices, /* VERTEX */ 303 num_instances, /* INSTANCE */ 304 1 /* CONST */ 305 }; 306 307 memset(key, 0, sizeof(key)); 308 memset(elem_index, ~0, sizeof(elem_index)); 309 310 /* See if there are vertex attribs of each type to translate and 311 * which ones. */ 312 for (i = 0; i < mgr->ve->count; i++) { 313 unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index; 314 315 if (!mgr->ve->incompatible_layout_elem[i] && 316 !mgr->incompatible_vb[vb_index]) { 317 continue; 318 } 319 320 if (!mgr->b.vertex_buffer[vb_index].stride) { 321 mask[VB_CONST] |= 1 << vb_index; 322 } else if (mgr->ve->ve[i].instance_divisor) { 323 mask[VB_INSTANCE] |= 1 << vb_index; 324 } else { 325 mask[VB_VERTEX] |= 1 << vb_index; 326 } 327 } 328 329 assert(mask[VB_VERTEX] || mask[VB_INSTANCE] || mask[VB_CONST]); 330 331 /* Find free vertex buffer slots. */ 332 if (!u_vbuf_translate_find_free_vb_slots(mgr, mask)) { 333 return FALSE; 334 } 335 336 /* Initialize the translate keys. */ 337 for (i = 0; i < mgr->ve->count; i++) { 338 struct translate_key *k; 339 struct translate_element *te; 340 unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index; 341 342 if (!mgr->ve->incompatible_layout_elem[i] && 343 !mgr->incompatible_vb[vb_index]) { 344 continue; 345 } 346 347 /* Set type to what we will translate. 348 * Whether vertex, instance, or constant attribs. */ 349 bit = 1 << vb_index; 350 for (type = 0; type < VB_NUM; type++) { 351 if (mask[type] & bit) { 352 break; 353 } 354 } 355 assert(type < VB_NUM); 356 assert(translate_is_output_format_supported(mgr->ve->native_format[i])); 357 /*printf("velem=%i type=%i\n", i, type);*/ 358 359 /* Add the vertex element. */ 360 k = &key[type]; 361 elem_index[type][i] = k->nr_elements; 362 363 te = &k->element[k->nr_elements]; 364 te->type = TRANSLATE_ELEMENT_NORMAL; 365 te->instance_divisor = 0; 366 te->input_buffer = vb_index; 367 te->input_format = mgr->ve->ve[i].src_format; 368 te->input_offset = mgr->ve->ve[i].src_offset; 369 te->output_format = mgr->ve->native_format[i]; 370 te->output_offset = k->output_stride; 371 372 k->output_stride += mgr->ve->native_format_size[i]; 373 k->nr_elements++; 374 } 375 376 /* Translate buffers. */ 377 for (type = 0; type < VB_NUM; type++) { 378 if (key[type].nr_elements) { 379 u_vbuf_translate_buffers(mgr, &key[type], mask[type], 380 mgr->fallback_vbs[type], 381 start[type], count[type]); 382 383 /* Fixup the stride for constant attribs. */ 384 if (type == VB_CONST) { 385 mgr->b.real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0; 386 } 387 } 388 } 389 390 /* Setup new vertex elements. */ 391 for (i = 0; i < mgr->ve->count; i++) { 392 for (type = 0; type < VB_NUM; type++) { 393 if (elem_index[type][i] < key[type].nr_elements) { 394 struct translate_element *te = &key[type].element[elem_index[type][i]]; 395 mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; 396 mgr->fallback_velems[i].src_format = te->output_format; 397 mgr->fallback_velems[i].src_offset = te->output_offset; 398 mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vbs[type]; 399 400 /* elem_index[type][i] can only be set for one type. */ 401 assert(type > VB_INSTANCE || elem_index[type+1][i] == ~0); 402 assert(type > VB_VERTEX || elem_index[type+2][i] == ~0); 403 break; 404 } 405 } 406 /* No translating, just copy the original vertex element over. */ 407 if (type == VB_NUM) { 408 memcpy(&mgr->fallback_velems[i], &mgr->ve->ve[i], 409 sizeof(struct pipe_vertex_element)); 410 } 411 } 412 413 mgr->fallback_ve = 414 mgr->pipe->create_vertex_elements_state(mgr->pipe, mgr->ve->count, 415 mgr->fallback_velems); 416 417 /* Preserve saved_ve. */ 418 mgr->ve_binding_lock = TRUE; 419 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->fallback_ve); 420 mgr->ve_binding_lock = FALSE; 421 return TRUE; 422} 423 424static void u_vbuf_translate_end(struct u_vbuf_priv *mgr) 425{ 426 unsigned i; 427 428 if (mgr->fallback_ve == NULL) { 429 return; 430 } 431 432 /* Restore vertex elements. */ 433 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */ 434 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve); 435 mgr->pipe->delete_vertex_elements_state(mgr->pipe, mgr->fallback_ve); 436 mgr->fallback_ve = NULL; 437 438 /* Unreference the now-unused VBOs. */ 439 for (i = 0; i < VB_NUM; i++) { 440 unsigned vb = mgr->fallback_vbs[i]; 441 if (vb != ~0) { 442 pipe_resource_reference(&mgr->b.real_vertex_buffer[vb].buffer, NULL); 443 mgr->fallback_vbs[i] = ~0; 444 } 445 } 446 mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; 447} 448 449#define FORMAT_REPLACE(what, withwhat) \ 450 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break 451 452struct u_vbuf_elements * 453u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, 454 unsigned count, 455 const struct pipe_vertex_element *attribs, 456 struct pipe_vertex_element *native_attribs) 457{ 458 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 459 unsigned i; 460 struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements); 461 462 ve->count = count; 463 464 if (!count) { 465 return ve; 466 } 467 468 memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count); 469 memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count); 470 471 /* Set the best native format in case the original format is not 472 * supported. */ 473 for (i = 0; i < count; i++) { 474 enum pipe_format format = ve->ve[i].src_format; 475 476 ve->src_format_size[i] = util_format_get_blocksize(format); 477 478 /* Choose a native format. 479 * For now we don't care about the alignment, that's going to 480 * be sorted out later. */ 481 if (!mgr->b.caps.format_fixed32) { 482 switch (format) { 483 FORMAT_REPLACE(R32_FIXED, R32_FLOAT); 484 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); 485 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT); 486 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT); 487 default:; 488 } 489 } 490 if (!mgr->b.caps.format_float16) { 491 switch (format) { 492 FORMAT_REPLACE(R16_FLOAT, R32_FLOAT); 493 FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT); 494 FORMAT_REPLACE(R16G16B16_FLOAT, R32G32B32_FLOAT); 495 FORMAT_REPLACE(R16G16B16A16_FLOAT, R32G32B32A32_FLOAT); 496 default:; 497 } 498 } 499 if (!mgr->b.caps.format_float64) { 500 switch (format) { 501 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); 502 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); 503 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT); 504 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT); 505 default:; 506 } 507 } 508 if (!mgr->b.caps.format_norm32) { 509 switch (format) { 510 FORMAT_REPLACE(R32_UNORM, R32_FLOAT); 511 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); 512 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT); 513 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT); 514 FORMAT_REPLACE(R32_SNORM, R32_FLOAT); 515 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT); 516 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT); 517 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT); 518 default:; 519 } 520 } 521 if (!mgr->b.caps.format_scaled32) { 522 switch (format) { 523 FORMAT_REPLACE(R32_USCALED, R32_FLOAT); 524 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); 525 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT); 526 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT); 527 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT); 528 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT); 529 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT); 530 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT); 531 default:; 532 } 533 } 534 535 native_attribs[i].src_format = format; 536 ve->native_format[i] = format; 537 ve->native_format_size[i] = 538 util_format_get_blocksize(ve->native_format[i]); 539 540 ve->incompatible_layout_elem[i] = 541 ve->ve[i].src_format != ve->native_format[i] || 542 (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0); 543 ve->incompatible_layout = 544 ve->incompatible_layout || 545 ve->incompatible_layout_elem[i]; 546 } 547 548 /* Align the formats to the size of DWORD if needed. */ 549 if (!mgr->b.caps.fetch_dword_unaligned) { 550 for (i = 0; i < count; i++) { 551 ve->native_format_size[i] = align(ve->native_format_size[i], 4); 552 } 553 } 554 555 return ve; 556} 557 558void u_vbuf_bind_vertex_elements(struct u_vbuf *mgrb, 559 void *cso, 560 struct u_vbuf_elements *ve) 561{ 562 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 563 564 if (!cso) { 565 return; 566 } 567 568 if (!mgr->ve_binding_lock) { 569 mgr->saved_ve = cso; 570 mgr->ve = ve; 571 } 572} 573 574void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr, 575 struct u_vbuf_elements *ve) 576{ 577 FREE(ve); 578} 579 580void u_vbuf_set_vertex_buffers(struct u_vbuf *mgrb, 581 unsigned count, 582 const struct pipe_vertex_buffer *bufs) 583{ 584 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 585 unsigned i; 586 587 mgr->any_user_vbs = FALSE; 588 mgr->incompatible_vb_layout = FALSE; 589 memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); 590 591 if (!mgr->b.caps.fetch_dword_unaligned) { 592 /* Check if the strides and offsets are aligned to the size of DWORD. */ 593 for (i = 0; i < count; i++) { 594 if (bufs[i].buffer) { 595 if (bufs[i].stride % 4 != 0 || 596 bufs[i].buffer_offset % 4 != 0) { 597 mgr->incompatible_vb_layout = TRUE; 598 mgr->incompatible_vb[i] = TRUE; 599 } 600 } 601 } 602 } 603 604 for (i = 0; i < count; i++) { 605 const struct pipe_vertex_buffer *vb = &bufs[i]; 606 607 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer); 608 609 mgr->b.real_vertex_buffer[i].buffer_offset = 610 mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset; 611 612 mgr->b.real_vertex_buffer[i].stride = 613 mgr->b.vertex_buffer[i].stride = vb->stride; 614 615 if (!vb->buffer || 616 mgr->incompatible_vb[i]) { 617 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 618 continue; 619 } 620 621 if (u_vbuf_resource(vb->buffer)->user_ptr) { 622 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 623 mgr->any_user_vbs = TRUE; 624 continue; 625 } 626 627 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer); 628 } 629 630 for (i = count; i < mgr->b.nr_vertex_buffers; i++) { 631 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 632 } 633 for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) { 634 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 635 } 636 637 mgr->b.nr_vertex_buffers = count; 638 mgr->b.nr_real_vertex_buffers = count; 639} 640 641void u_vbuf_set_index_buffer(struct u_vbuf *mgr, 642 const struct pipe_index_buffer *ib) 643{ 644 if (ib && ib->buffer) { 645 assert(ib->offset % ib->index_size == 0); 646 pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); 647 mgr->index_buffer.offset = ib->offset; 648 mgr->index_buffer.index_size = ib->index_size; 649 } else { 650 pipe_resource_reference(&mgr->index_buffer.buffer, NULL); 651 } 652} 653 654static void 655u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, 656 int start_vertex, unsigned num_vertices, 657 int start_instance, unsigned num_instances) 658{ 659 unsigned i; 660 unsigned nr_velems = mgr->ve->count; 661 unsigned nr_vbufs = mgr->b.nr_vertex_buffers; 662 struct pipe_vertex_element *velems = 663 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 664 unsigned start_offset[PIPE_MAX_ATTRIBS]; 665 unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; 666 667 /* Determine how much data needs to be uploaded. */ 668 for (i = 0; i < nr_velems; i++) { 669 struct pipe_vertex_element *velem = &velems[i]; 670 unsigned index = velem->vertex_buffer_index; 671 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index]; 672 unsigned instance_div, first, size; 673 674 /* Skip the buffers generated by translate. */ 675 if (index == mgr->fallback_vbs[VB_VERTEX] || 676 index == mgr->fallback_vbs[VB_INSTANCE] || 677 index == mgr->fallback_vbs[VB_CONST]) { 678 continue; 679 } 680 681 assert(vb->buffer); 682 683 if (!u_vbuf_resource(vb->buffer)->user_ptr) { 684 continue; 685 } 686 687 instance_div = velem->instance_divisor; 688 first = vb->buffer_offset + velem->src_offset; 689 690 if (!vb->stride) { 691 /* Constant attrib. */ 692 size = mgr->ve->src_format_size[i]; 693 } else if (instance_div) { 694 /* Per-instance attrib. */ 695 unsigned count = (num_instances + instance_div - 1) / instance_div; 696 first += vb->stride * start_instance; 697 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; 698 } else { 699 /* Per-vertex attrib. */ 700 first += vb->stride * start_vertex; 701 size = vb->stride * (num_vertices - 1) + mgr->ve->src_format_size[i]; 702 } 703 704 /* Update offsets. */ 705 if (!end_offset[index]) { 706 start_offset[index] = first; 707 end_offset[index] = first + size; 708 } else { 709 if (first < start_offset[index]) 710 start_offset[index] = first; 711 if (first + size > end_offset[index]) 712 end_offset[index] = first + size; 713 } 714 } 715 716 /* Upload buffers. */ 717 for (i = 0; i < nr_vbufs; i++) { 718 unsigned start, end = end_offset[i]; 719 struct pipe_vertex_buffer *real_vb; 720 uint8_t *ptr; 721 722 if (!end) { 723 continue; 724 } 725 726 start = start_offset[i]; 727 assert(start < end); 728 729 real_vb = &mgr->b.real_vertex_buffer[i]; 730 ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr; 731 732 u_upload_data(mgr->b.uploader, start, end - start, ptr + start, 733 &real_vb->buffer_offset, &real_vb->buffer); 734 735 real_vb->buffer_offset -= start; 736 } 737} 738 739unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgrb) 740{ 741 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 742 unsigned i, nr = mgr->ve->count; 743 struct pipe_vertex_element *velems = 744 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 745 unsigned result = ~0; 746 747 for (i = 0; i < nr; i++) { 748 struct pipe_vertex_buffer *vb = 749 &mgr->b.real_vertex_buffer[velems[i].vertex_buffer_index]; 750 unsigned size, max_count, value; 751 752 /* We're not interested in constant and per-instance attribs. */ 753 if (!vb->buffer || 754 !vb->stride || 755 velems[i].instance_divisor) { 756 continue; 757 } 758 759 size = vb->buffer->width0; 760 761 /* Subtract buffer_offset. */ 762 value = vb->buffer_offset; 763 if (value >= size) { 764 return 0; 765 } 766 size -= value; 767 768 /* Subtract src_offset. */ 769 value = velems[i].src_offset; 770 if (value >= size) { 771 return 0; 772 } 773 size -= value; 774 775 /* Subtract format_size. */ 776 value = mgr->ve->native_format_size[i]; 777 if (value >= size) { 778 return 0; 779 } 780 size -= value; 781 782 /* Compute the max count. */ 783 max_count = 1 + size / vb->stride; 784 result = MIN2(result, max_count); 785 } 786 return result; 787} 788 789static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr) 790{ 791 unsigned i, nr = mgr->ve->count; 792 793 for (i = 0; i < nr; i++) { 794 struct pipe_vertex_buffer *vb; 795 unsigned index; 796 797 /* Per-instance attribs don't need min/max_index. */ 798 if (mgr->ve->ve[i].instance_divisor) { 799 continue; 800 } 801 802 index = mgr->ve->ve[i].vertex_buffer_index; 803 vb = &mgr->b.vertex_buffer[index]; 804 805 /* Constant attribs don't need min/max_index. */ 806 if (!vb->stride) { 807 continue; 808 } 809 810 /* Per-vertex attribs need min/max_index. */ 811 if (u_vbuf_resource(vb->buffer)->user_ptr || 812 mgr->ve->incompatible_layout_elem[i] || 813 mgr->incompatible_vb[index]) { 814 return TRUE; 815 } 816 } 817 818 return FALSE; 819} 820 821static void u_vbuf_get_minmax_index(struct pipe_context *pipe, 822 struct pipe_index_buffer *ib, 823 const struct pipe_draw_info *info, 824 int *out_min_index, 825 int *out_max_index) 826{ 827 struct pipe_transfer *transfer = NULL; 828 const void *indices; 829 unsigned i; 830 unsigned restart_index = info->restart_index; 831 832 if (u_vbuf_resource(ib->buffer)->user_ptr) { 833 indices = u_vbuf_resource(ib->buffer)->user_ptr + 834 ib->offset + info->start * ib->index_size; 835 } else { 836 indices = pipe_buffer_map_range(pipe, ib->buffer, 837 ib->offset + info->start * ib->index_size, 838 info->count * ib->index_size, 839 PIPE_TRANSFER_READ, &transfer); 840 } 841 842 switch (ib->index_size) { 843 case 4: { 844 const unsigned *ui_indices = (const unsigned*)indices; 845 unsigned max_ui = 0; 846 unsigned min_ui = ~0U; 847 if (info->primitive_restart) { 848 for (i = 0; i < info->count; i++) { 849 if (ui_indices[i] != restart_index) { 850 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 851 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 852 } 853 } 854 } 855 else { 856 for (i = 0; i < info->count; i++) { 857 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 858 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 859 } 860 } 861 *out_min_index = min_ui; 862 *out_max_index = max_ui; 863 break; 864 } 865 case 2: { 866 const unsigned short *us_indices = (const unsigned short*)indices; 867 unsigned max_us = 0; 868 unsigned min_us = ~0U; 869 if (info->primitive_restart) { 870 for (i = 0; i < info->count; i++) { 871 if (us_indices[i] != restart_index) { 872 if (us_indices[i] > max_us) max_us = us_indices[i]; 873 if (us_indices[i] < min_us) min_us = us_indices[i]; 874 } 875 } 876 } 877 else { 878 for (i = 0; i < info->count; i++) { 879 if (us_indices[i] > max_us) max_us = us_indices[i]; 880 if (us_indices[i] < min_us) min_us = us_indices[i]; 881 } 882 } 883 *out_min_index = min_us; 884 *out_max_index = max_us; 885 break; 886 } 887 case 1: { 888 const unsigned char *ub_indices = (const unsigned char*)indices; 889 unsigned max_ub = 0; 890 unsigned min_ub = ~0U; 891 if (info->primitive_restart) { 892 for (i = 0; i < info->count; i++) { 893 if (ub_indices[i] != restart_index) { 894 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 895 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 896 } 897 } 898 } 899 else { 900 for (i = 0; i < info->count; i++) { 901 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 902 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 903 } 904 } 905 *out_min_index = min_ub; 906 *out_max_index = max_ub; 907 break; 908 } 909 default: 910 assert(0); 911 *out_min_index = 0; 912 *out_max_index = 0; 913 } 914 915 if (transfer) { 916 pipe_buffer_unmap(pipe, transfer); 917 } 918} 919 920enum u_vbuf_return_flags 921u_vbuf_draw_begin(struct u_vbuf *mgrb, 922 const struct pipe_draw_info *info) 923{ 924 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 925 int start_vertex; 926 unsigned num_vertices; 927 928 if (!mgr->incompatible_vb_layout && 929 !mgr->ve->incompatible_layout && 930 !mgr->any_user_vbs) { 931 return 0; 932 } 933 934 if (info->indexed) { 935 int min_index, max_index; 936 bool index_bounds_valid = false; 937 938 if (info->max_index != ~0) { 939 min_index = info->min_index; 940 max_index = info->max_index; 941 index_bounds_valid = true; 942 } else if (u_vbuf_need_minmax_index(mgr)) { 943 u_vbuf_get_minmax_index(mgr->pipe, &mgr->b.index_buffer, info, 944 &min_index, &max_index); 945 index_bounds_valid = true; 946 } 947 948 /* If the index bounds are valid, it means some upload or translation 949 * of per-vertex attribs will be performed. */ 950 if (index_bounds_valid) { 951 assert(min_index <= max_index); 952 953 start_vertex = min_index + info->index_bias; 954 num_vertices = max_index + 1 - min_index; 955 } else { 956 /* Nothing to do for per-vertex attribs. */ 957 start_vertex = 0; 958 num_vertices = 0; 959 min_index = 0; 960 } 961 } else { 962 start_vertex = info->start; 963 num_vertices = info->count; 964 } 965 966 /* Translate vertices with non-native layouts or formats. */ 967 if (mgr->incompatible_vb_layout || mgr->ve->incompatible_layout) { 968 /* XXX check the return value */ 969 u_vbuf_translate_begin(mgr, start_vertex, num_vertices, 970 info->start_instance, info->instance_count); 971 } 972 973 /* Upload user buffers. */ 974 if (mgr->any_user_vbs) { 975 u_vbuf_upload_buffers(mgr, start_vertex, num_vertices, 976 info->start_instance, info->instance_count); 977 } 978 979 /*unsigned i; 980 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 981 printf("input %i: ", i); 982 util_dump_vertex_buffer(stdout, mgr->b.vertex_buffer+i); 983 printf("\n"); 984 } 985 for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { 986 printf("real %i: ", i); 987 util_dump_vertex_buffer(stdout, mgr->b.real_vertex_buffer+i); 988 printf("\n"); 989 }*/ 990 991 return U_VBUF_BUFFERS_UPDATED; 992} 993 994void u_vbuf_draw_end(struct u_vbuf *mgrb) 995{ 996 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 997 998 if (mgr->fallback_ve) { 999 u_vbuf_translate_end(mgr); 1000 } 1001} 1002