u_vbuf.c revision de93347d482a96f88c898622c9620f03e677e386
1/************************************************************************** 2 * 3 * Copyright 2011 Marek Olšák <maraeo@gmail.com> 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "util/u_vbuf.h" 29 30#include "util/u_format.h" 31#include "util/u_inlines.h" 32#include "util/u_memory.h" 33#include "util/u_upload_mgr.h" 34#include "translate/translate.h" 35#include "translate/translate_cache.h" 36 37struct u_vbuf_elements { 38 unsigned count; 39 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS]; 40 41 unsigned src_format_size[PIPE_MAX_ATTRIBS]; 42 43 /* If (velem[i].src_format != native_format[i]), the vertex buffer 44 * referenced by the vertex element cannot be used for rendering and 45 * its vertex data must be translated to native_format[i]. */ 46 enum pipe_format native_format[PIPE_MAX_ATTRIBS]; 47 unsigned native_format_size[PIPE_MAX_ATTRIBS]; 48 49 /* This might mean two things: 50 * - src_format != native_format, as discussed above. 51 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */ 52 boolean incompatible_layout; 53 /* Per-element flags. */ 54 boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS]; 55}; 56 57struct u_vbuf_priv { 58 struct u_vbuf b; 59 struct pipe_context *pipe; 60 struct translate_cache *translate_cache; 61 62 /* Vertex element state bound by the state tracker. */ 63 void *saved_ve; 64 /* and its associated helper structure for this module. */ 65 struct u_vbuf_elements *ve; 66 67 /* Vertex elements used for the translate fallback. */ 68 struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS]; 69 /* If non-NULL, this is a vertex element state used for the translate 70 * fallback and therefore used for rendering too. */ 71 void *fallback_ve; 72 /* The vertex buffer slot index where translated vertices have been 73 * stored in. */ 74 unsigned fallback_vb_slot; 75 /* When binding the fallback vertex element state, we don't want to 76 * change saved_ve and ve. This is set to TRUE in such cases. */ 77 boolean ve_binding_lock; 78 79 /* Whether there is any user buffer. */ 80 boolean any_user_vbs; 81 /* Whether there is a buffer with a non-native layout. */ 82 boolean incompatible_vb_layout; 83 /* Per-buffer flags. */ 84 boolean incompatible_vb[PIPE_MAX_ATTRIBS]; 85}; 86 87static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr) 88{ 89 struct pipe_screen *screen = mgr->pipe->screen; 90 91 mgr->b.caps.format_fixed32 = 92 screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER, 93 0, PIPE_BIND_VERTEX_BUFFER); 94 95 mgr->b.caps.format_float16 = 96 screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER, 97 0, PIPE_BIND_VERTEX_BUFFER); 98 99 mgr->b.caps.format_float64 = 100 screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER, 101 0, PIPE_BIND_VERTEX_BUFFER); 102 103 mgr->b.caps.format_norm32 = 104 screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER, 105 0, PIPE_BIND_VERTEX_BUFFER) && 106 screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER, 107 0, PIPE_BIND_VERTEX_BUFFER); 108 109 mgr->b.caps.format_scaled32 = 110 screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER, 111 0, PIPE_BIND_VERTEX_BUFFER) && 112 screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER, 113 0, PIPE_BIND_VERTEX_BUFFER); 114} 115 116struct u_vbuf * 117u_vbuf_create(struct pipe_context *pipe, 118 unsigned upload_buffer_size, 119 unsigned upload_buffer_alignment, 120 unsigned upload_buffer_bind, 121 enum u_fetch_alignment fetch_alignment) 122{ 123 struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv); 124 125 mgr->pipe = pipe; 126 mgr->translate_cache = translate_cache_create(); 127 mgr->fallback_vb_slot = ~0; 128 129 mgr->b.uploader = u_upload_create(pipe, upload_buffer_size, 130 upload_buffer_alignment, 131 upload_buffer_bind); 132 133 mgr->b.caps.fetch_dword_unaligned = 134 fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED; 135 136 u_vbuf_init_format_caps(mgr); 137 138 return &mgr->b; 139} 140 141void u_vbuf_destroy(struct u_vbuf *mgrb) 142{ 143 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 144 unsigned i; 145 146 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 147 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 148 } 149 for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { 150 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 151 } 152 153 translate_cache_destroy(mgr->translate_cache); 154 u_upload_destroy(mgr->b.uploader); 155 FREE(mgr); 156} 157 158 159static unsigned u_vbuf_get_free_real_vb_slot(struct u_vbuf_priv *mgr) 160{ 161 unsigned i, nr = mgr->ve->count; 162 boolean used_vb[PIPE_MAX_ATTRIBS] = {0}; 163 164 for (i = 0; i < nr; i++) { 165 if (!mgr->ve->incompatible_layout_elem[i]) { 166 unsigned index = mgr->ve->ve[i].vertex_buffer_index; 167 168 if (!mgr->incompatible_vb[index]) { 169 used_vb[index] = TRUE; 170 } 171 } 172 } 173 174 for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { 175 if (!used_vb[i]) { 176 if (i >= mgr->b.nr_real_vertex_buffers) { 177 mgr->b.nr_real_vertex_buffers = i+1; 178 } 179 return i; 180 } 181 } 182 return ~0; 183} 184 185static void 186u_vbuf_translate_begin(struct u_vbuf_priv *mgr, 187 int min_index, int max_index) 188{ 189 struct translate_key key; 190 struct translate_element *te; 191 unsigned tr_elem_index[PIPE_MAX_ATTRIBS]; 192 struct translate *tr; 193 boolean vb_translated[PIPE_MAX_ATTRIBS] = {0}; 194 uint8_t *out_map; 195 struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; 196 struct pipe_resource *out_buffer = NULL; 197 unsigned i, num_verts, out_offset; 198 boolean upload_flushed = FALSE; 199 200 memset(&key, 0, sizeof(key)); 201 memset(tr_elem_index, 0xff, sizeof(tr_elem_index)); 202 203 /* Get a new vertex buffer slot. */ 204 mgr->fallback_vb_slot = u_vbuf_get_free_real_vb_slot(mgr); 205 206 if (mgr->fallback_vb_slot == ~0) { 207 return; /* XXX error, not enough attribs */ 208 } 209 210 /* Initialize the description of how vertices should be translated. */ 211 for (i = 0; i < mgr->ve->count; i++) { 212 enum pipe_format output_format = mgr->ve->native_format[i]; 213 unsigned output_format_size = mgr->ve->native_format_size[i]; 214 215 /* Check for support. */ 216 if (!mgr->ve->incompatible_layout_elem[i] && 217 !mgr->incompatible_vb[mgr->ve->ve[i].vertex_buffer_index]) { 218 continue; 219 } 220 221 /* Workaround for translate: output floats instead of halfs. */ 222 switch (output_format) { 223 case PIPE_FORMAT_R16_FLOAT: 224 output_format = PIPE_FORMAT_R32_FLOAT; 225 output_format_size = 4; 226 break; 227 case PIPE_FORMAT_R16G16_FLOAT: 228 output_format = PIPE_FORMAT_R32G32_FLOAT; 229 output_format_size = 8; 230 break; 231 case PIPE_FORMAT_R16G16B16_FLOAT: 232 output_format = PIPE_FORMAT_R32G32B32_FLOAT; 233 output_format_size = 12; 234 break; 235 case PIPE_FORMAT_R16G16B16A16_FLOAT: 236 output_format = PIPE_FORMAT_R32G32B32A32_FLOAT; 237 output_format_size = 16; 238 break; 239 default:; 240 } 241 242 /* Add this vertex element. */ 243 te = &key.element[key.nr_elements]; 244 te->type = TRANSLATE_ELEMENT_NORMAL; 245 te->instance_divisor = 0; 246 te->input_buffer = mgr->ve->ve[i].vertex_buffer_index; 247 te->input_format = mgr->ve->ve[i].src_format; 248 te->input_offset = mgr->ve->ve[i].src_offset; 249 te->output_format = output_format; 250 te->output_offset = key.output_stride; 251 252 key.output_stride += output_format_size; 253 vb_translated[mgr->ve->ve[i].vertex_buffer_index] = TRUE; 254 tr_elem_index[i] = key.nr_elements; 255 key.nr_elements++; 256 } 257 258 /* Get a translate object. */ 259 tr = translate_cache_find(mgr->translate_cache, &key); 260 261 /* Map buffers we want to translate. */ 262 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 263 if (vb_translated[i]) { 264 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; 265 266 uint8_t *map = pipe_buffer_map(mgr->pipe, vb->buffer, 267 PIPE_TRANSFER_READ, &vb_transfer[i]); 268 269 tr->set_buffer(tr, i, 270 map + vb->buffer_offset + vb->stride * min_index, 271 vb->stride, ~0); 272 } 273 } 274 275 /* Create and map the output buffer. */ 276 num_verts = max_index + 1 - min_index; 277 278 u_upload_alloc(mgr->b.uploader, 279 key.output_stride * min_index, 280 key.output_stride * num_verts, 281 &out_offset, &out_buffer, &upload_flushed, 282 (void**)&out_map); 283 284 out_offset -= key.output_stride * min_index; 285 286 /* Translate. */ 287 tr->run(tr, 0, num_verts, 0, out_map); 288 289 /* Unmap all buffers. */ 290 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 291 if (vb_translated[i]) { 292 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); 293 } 294 } 295 296 /* Setup the new vertex buffer. */ 297 mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer_offset = out_offset; 298 mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].stride = key.output_stride; 299 300 /* Move the buffer reference. */ 301 pipe_resource_reference( 302 &mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer, NULL); 303 mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer = out_buffer; 304 out_buffer = NULL; 305 306 /* Setup new vertex elements. */ 307 for (i = 0; i < mgr->ve->count; i++) { 308 if (tr_elem_index[i] < key.nr_elements) { 309 te = &key.element[tr_elem_index[i]]; 310 mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; 311 mgr->fallback_velems[i].src_format = te->output_format; 312 mgr->fallback_velems[i].src_offset = te->output_offset; 313 mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vb_slot; 314 } else { 315 memcpy(&mgr->fallback_velems[i], &mgr->ve->ve[i], 316 sizeof(struct pipe_vertex_element)); 317 } 318 } 319 320 321 mgr->fallback_ve = 322 mgr->pipe->create_vertex_elements_state(mgr->pipe, mgr->ve->count, 323 mgr->fallback_velems); 324 325 /* Preserve saved_ve. */ 326 mgr->ve_binding_lock = TRUE; 327 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->fallback_ve); 328 mgr->ve_binding_lock = FALSE; 329} 330 331static void u_vbuf_translate_end(struct u_vbuf_priv *mgr) 332{ 333 if (mgr->fallback_ve == NULL) { 334 return; 335 } 336 337 /* Restore vertex elements. */ 338 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */ 339 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve); 340 mgr->pipe->delete_vertex_elements_state(mgr->pipe, mgr->fallback_ve); 341 mgr->fallback_ve = NULL; 342 343 /* Delete the now-unused VBO. */ 344 pipe_resource_reference(&mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer, 345 NULL); 346 mgr->fallback_vb_slot = ~0; 347 mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; 348} 349 350#define FORMAT_REPLACE(what, withwhat) \ 351 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break 352 353struct u_vbuf_elements * 354u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, 355 unsigned count, 356 const struct pipe_vertex_element *attribs, 357 struct pipe_vertex_element *native_attribs) 358{ 359 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 360 unsigned i; 361 struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements); 362 363 ve->count = count; 364 365 if (!count) { 366 return ve; 367 } 368 369 memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count); 370 memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count); 371 372 /* Set the best native format in case the original format is not 373 * supported. */ 374 for (i = 0; i < count; i++) { 375 enum pipe_format format = ve->ve[i].src_format; 376 377 ve->src_format_size[i] = util_format_get_blocksize(format); 378 379 /* Choose a native format. 380 * For now we don't care about the alignment, that's going to 381 * be sorted out later. */ 382 if (!mgr->b.caps.format_fixed32) { 383 switch (format) { 384 FORMAT_REPLACE(R32_FIXED, R32_FLOAT); 385 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); 386 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT); 387 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT); 388 default:; 389 } 390 } 391 if (!mgr->b.caps.format_float16) { 392 switch (format) { 393 FORMAT_REPLACE(R16_FLOAT, R32_FLOAT); 394 FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT); 395 FORMAT_REPLACE(R16G16B16_FLOAT, R32G32B32_FLOAT); 396 FORMAT_REPLACE(R16G16B16A16_FLOAT, R32G32B32A32_FLOAT); 397 default:; 398 } 399 } 400 if (!mgr->b.caps.format_float64) { 401 switch (format) { 402 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); 403 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); 404 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT); 405 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT); 406 default:; 407 } 408 } 409 if (!mgr->b.caps.format_norm32) { 410 switch (format) { 411 FORMAT_REPLACE(R32_UNORM, R32_FLOAT); 412 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); 413 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT); 414 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT); 415 FORMAT_REPLACE(R32_SNORM, R32_FLOAT); 416 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT); 417 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT); 418 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT); 419 default:; 420 } 421 } 422 if (!mgr->b.caps.format_scaled32) { 423 switch (format) { 424 FORMAT_REPLACE(R32_USCALED, R32_FLOAT); 425 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); 426 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT); 427 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT); 428 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT); 429 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT); 430 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT); 431 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT); 432 default:; 433 } 434 } 435 436 native_attribs[i].src_format = format; 437 ve->native_format[i] = format; 438 ve->native_format_size[i] = 439 util_format_get_blocksize(ve->native_format[i]); 440 441 ve->incompatible_layout_elem[i] = 442 ve->ve[i].src_format != ve->native_format[i] || 443 (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0); 444 ve->incompatible_layout = 445 ve->incompatible_layout || 446 ve->incompatible_layout_elem[i]; 447 } 448 449 /* Align the formats to the size of DWORD if needed. */ 450 if (!mgr->b.caps.fetch_dword_unaligned) { 451 for (i = 0; i < count; i++) { 452 ve->native_format_size[i] = align(ve->native_format_size[i], 4); 453 } 454 } 455 456 return ve; 457} 458 459void u_vbuf_bind_vertex_elements(struct u_vbuf *mgrb, 460 void *cso, 461 struct u_vbuf_elements *ve) 462{ 463 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 464 465 if (!cso) { 466 return; 467 } 468 469 if (!mgr->ve_binding_lock) { 470 mgr->saved_ve = cso; 471 mgr->ve = ve; 472 } 473} 474 475void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr, 476 struct u_vbuf_elements *ve) 477{ 478 FREE(ve); 479} 480 481void u_vbuf_set_vertex_buffers(struct u_vbuf *mgrb, 482 unsigned count, 483 const struct pipe_vertex_buffer *bufs) 484{ 485 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 486 unsigned i; 487 488 mgr->any_user_vbs = FALSE; 489 mgr->incompatible_vb_layout = FALSE; 490 memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); 491 492 if (!mgr->b.caps.fetch_dword_unaligned) { 493 /* Check if the strides and offsets are aligned to the size of DWORD. */ 494 for (i = 0; i < count; i++) { 495 if (bufs[i].buffer) { 496 if (bufs[i].stride % 4 != 0 || 497 bufs[i].buffer_offset % 4 != 0) { 498 mgr->incompatible_vb_layout = TRUE; 499 mgr->incompatible_vb[i] = TRUE; 500 } 501 } 502 } 503 } 504 505 for (i = 0; i < count; i++) { 506 const struct pipe_vertex_buffer *vb = &bufs[i]; 507 508 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer); 509 510 mgr->b.real_vertex_buffer[i].buffer_offset = 511 mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset; 512 513 mgr->b.real_vertex_buffer[i].stride = 514 mgr->b.vertex_buffer[i].stride = vb->stride; 515 516 if (!vb->buffer || 517 mgr->incompatible_vb[i]) { 518 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 519 continue; 520 } 521 522 if (u_vbuf_resource(vb->buffer)->user_ptr) { 523 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 524 mgr->any_user_vbs = TRUE; 525 continue; 526 } 527 528 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer); 529 } 530 531 for (i = count; i < mgr->b.nr_vertex_buffers; i++) { 532 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 533 } 534 for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) { 535 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 536 } 537 538 mgr->b.nr_vertex_buffers = count; 539 mgr->b.nr_real_vertex_buffers = count; 540} 541 542void u_vbuf_set_index_buffer(struct u_vbuf *mgr, 543 const struct pipe_index_buffer *ib) 544{ 545 if (ib && ib->buffer) { 546 assert(ib->offset % ib->index_size == 0); 547 pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); 548 mgr->index_buffer.offset = ib->offset; 549 mgr->index_buffer.index_size = ib->index_size; 550 } else { 551 pipe_resource_reference(&mgr->index_buffer.buffer, NULL); 552 } 553} 554 555static void 556u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, 557 int min_index, int max_index, 558 unsigned instance_count) 559{ 560 unsigned i; 561 unsigned count = max_index + 1 - min_index; 562 unsigned nr_velems = mgr->ve->count; 563 unsigned nr_vbufs = mgr->b.nr_vertex_buffers; 564 struct pipe_vertex_element *velems = 565 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 566 unsigned start_offset[PIPE_MAX_ATTRIBS]; 567 unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; 568 569 /* Determine how much data needs to be uploaded. */ 570 for (i = 0; i < nr_velems; i++) { 571 struct pipe_vertex_element *velem = &velems[i]; 572 unsigned index = velem->vertex_buffer_index; 573 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index]; 574 unsigned instance_div, first, size; 575 576 /* Skip the buffer generated by translate. */ 577 if (index == mgr->fallback_vb_slot) { 578 continue; 579 } 580 581 assert(vb->buffer); 582 583 if (!u_vbuf_resource(vb->buffer)->user_ptr) { 584 continue; 585 } 586 587 instance_div = velem->instance_divisor; 588 first = vb->buffer_offset + velem->src_offset; 589 590 if (!vb->stride) { 591 /* Constant attrib. */ 592 size = mgr->ve->src_format_size[i]; 593 } else if (instance_div) { 594 /* Per-instance attrib. */ 595 unsigned count = (instance_count + instance_div - 1) / instance_div; 596 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; 597 } else { 598 /* Per-vertex attrib. */ 599 first += vb->stride * min_index; 600 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; 601 } 602 603 /* Update offsets. */ 604 if (!end_offset[index]) { 605 start_offset[index] = first; 606 end_offset[index] = first + size; 607 } else { 608 if (first < start_offset[index]) 609 start_offset[index] = first; 610 if (first + size > end_offset[index]) 611 end_offset[index] = first + size; 612 } 613 } 614 615 /* Upload buffers. */ 616 for (i = 0; i < nr_vbufs; i++) { 617 unsigned start, end = end_offset[i]; 618 boolean flushed; 619 struct pipe_vertex_buffer *real_vb; 620 uint8_t *ptr; 621 622 if (!end) { 623 continue; 624 } 625 626 start = start_offset[i]; 627 assert(start < end); 628 629 real_vb = &mgr->b.real_vertex_buffer[i]; 630 ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr; 631 632 u_upload_data(mgr->b.uploader, start, end - start, ptr + start, 633 &real_vb->buffer_offset, &real_vb->buffer, &flushed); 634 635 real_vb->buffer_offset -= start; 636 } 637} 638 639unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgrb) 640{ 641 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 642 unsigned i, nr = mgr->ve->count; 643 struct pipe_vertex_element *velems = 644 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 645 unsigned result = ~0; 646 647 for (i = 0; i < nr; i++) { 648 struct pipe_vertex_buffer *vb = 649 &mgr->b.real_vertex_buffer[velems[i].vertex_buffer_index]; 650 unsigned size, max_count, value; 651 652 /* We're not interested in constant and per-instance attribs. */ 653 if (!vb->buffer || 654 !vb->stride || 655 velems[i].instance_divisor) { 656 continue; 657 } 658 659 size = vb->buffer->width0; 660 661 /* Subtract buffer_offset. */ 662 value = vb->buffer_offset; 663 if (value >= size) { 664 return 0; 665 } 666 size -= value; 667 668 /* Subtract src_offset. */ 669 value = velems[i].src_offset; 670 if (value >= size) { 671 return 0; 672 } 673 size -= value; 674 675 /* Subtract format_size. */ 676 value = mgr->ve->native_format_size[i]; 677 if (value >= size) { 678 return 0; 679 } 680 size -= value; 681 682 /* Compute the max count. */ 683 max_count = 1 + size / vb->stride; 684 result = MIN2(result, max_count); 685 } 686 return result; 687} 688 689static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr) 690{ 691 unsigned i, nr = mgr->ve->count; 692 693 for (i = 0; i < nr; i++) { 694 struct pipe_vertex_buffer *vb; 695 unsigned index; 696 697 /* Per-instance attribs don't need min/max_index. */ 698 if (mgr->ve->ve[i].instance_divisor) { 699 continue; 700 } 701 702 index = mgr->ve->ve[i].vertex_buffer_index; 703 vb = &mgr->b.vertex_buffer[index]; 704 705 /* Constant attribs don't need min/max_index. */ 706 if (!vb->stride) { 707 continue; 708 } 709 710 /* Per-vertex attribs need min/max_index. */ 711 if (u_vbuf_resource(vb->buffer)->user_ptr || 712 mgr->ve->incompatible_layout_elem[i] || 713 mgr->incompatible_vb[index]) { 714 return TRUE; 715 } 716 } 717 718 return FALSE; 719} 720 721static void u_vbuf_get_minmax_index(struct pipe_context *pipe, 722 struct pipe_index_buffer *ib, 723 const struct pipe_draw_info *info, 724 int *out_min_index, 725 int *out_max_index) 726{ 727 struct pipe_transfer *transfer = NULL; 728 const void *indices; 729 unsigned i; 730 unsigned restart_index = info->restart_index; 731 732 if (u_vbuf_resource(ib->buffer)->user_ptr) { 733 indices = u_vbuf_resource(ib->buffer)->user_ptr + 734 ib->offset + info->start * ib->index_size; 735 } else { 736 indices = pipe_buffer_map_range(pipe, ib->buffer, 737 ib->offset + info->start * ib->index_size, 738 info->count * ib->index_size, 739 PIPE_TRANSFER_READ, &transfer); 740 } 741 742 switch (ib->index_size) { 743 case 4: { 744 const unsigned *ui_indices = (const unsigned*)indices; 745 unsigned max_ui = 0; 746 unsigned min_ui = ~0U; 747 if (info->primitive_restart) { 748 for (i = 0; i < info->count; i++) { 749 if (ui_indices[i] != restart_index) { 750 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 751 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 752 } 753 } 754 } 755 else { 756 for (i = 0; i < info->count; i++) { 757 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 758 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 759 } 760 } 761 *out_min_index = min_ui; 762 *out_max_index = max_ui; 763 break; 764 } 765 case 2: { 766 const unsigned short *us_indices = (const unsigned short*)indices; 767 unsigned max_us = 0; 768 unsigned min_us = ~0U; 769 if (info->primitive_restart) { 770 for (i = 0; i < info->count; i++) { 771 if (us_indices[i] != restart_index) { 772 if (us_indices[i] > max_us) max_us = us_indices[i]; 773 if (us_indices[i] < min_us) min_us = us_indices[i]; 774 } 775 } 776 } 777 else { 778 for (i = 0; i < info->count; i++) { 779 if (us_indices[i] > max_us) max_us = us_indices[i]; 780 if (us_indices[i] < min_us) min_us = us_indices[i]; 781 } 782 } 783 *out_min_index = min_us; 784 *out_max_index = max_us; 785 break; 786 } 787 case 1: { 788 const unsigned char *ub_indices = (const unsigned char*)indices; 789 unsigned max_ub = 0; 790 unsigned min_ub = ~0U; 791 if (info->primitive_restart) { 792 for (i = 0; i < info->count; i++) { 793 if (ub_indices[i] != restart_index) { 794 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 795 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 796 } 797 } 798 } 799 else { 800 for (i = 0; i < info->count; i++) { 801 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 802 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 803 } 804 } 805 *out_min_index = min_ub; 806 *out_max_index = max_ub; 807 break; 808 } 809 default: 810 assert(0); 811 *out_min_index = 0; 812 *out_max_index = 0; 813 } 814 815 if (transfer) { 816 pipe_buffer_unmap(pipe, transfer); 817 } 818} 819 820enum u_vbuf_return_flags 821u_vbuf_draw_begin(struct u_vbuf *mgrb, 822 const struct pipe_draw_info *info) 823{ 824 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 825 int min_index, max_index; 826 827 if (!mgr->incompatible_vb_layout && 828 !mgr->ve->incompatible_layout && 829 !mgr->any_user_vbs) { 830 return 0; 831 } 832 833 if (info->indexed) { 834 if (info->max_index != ~0) { 835 min_index = info->min_index + info->index_bias; 836 max_index = info->max_index + info->index_bias; 837 } else if (u_vbuf_need_minmax_index(mgr)) { 838 u_vbuf_get_minmax_index(mgr->pipe, &mgr->b.index_buffer, info, 839 &min_index, &max_index); 840 min_index += info->index_bias; 841 max_index += info->index_bias; 842 } else { 843 min_index = 0; 844 max_index = 0; 845 } 846 } else { 847 min_index = info->start; 848 max_index = info->start + info->count - 1; 849 } 850 851 /* Translate vertices with non-native layouts or formats. */ 852 if (mgr->incompatible_vb_layout || mgr->ve->incompatible_layout) { 853 u_vbuf_translate_begin(mgr, min_index, max_index); 854 } 855 856 /* Upload user buffers. */ 857 if (mgr->any_user_vbs) { 858 u_vbuf_upload_buffers(mgr, min_index, max_index, info->instance_count); 859 } 860 return U_VBUF_BUFFERS_UPDATED; 861} 862 863void u_vbuf_draw_end(struct u_vbuf *mgrb) 864{ 865 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 866 867 if (mgr->fallback_ve) { 868 u_vbuf_translate_end(mgr); 869 } 870} 871