u_vbuf.c revision dbd60d27e8087a3bacf36d4eceef15dc4fcdccee
1/************************************************************************** 2 * 3 * Copyright 2011 Marek Olšák <maraeo@gmail.com> 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "util/u_vbuf.h" 29 30#include "util/u_format.h" 31#include "util/u_inlines.h" 32#include "util/u_memory.h" 33#include "util/u_upload_mgr.h" 34#include "translate/translate.h" 35#include "translate/translate_cache.h" 36 37struct u_vbuf_elements { 38 unsigned count; 39 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS]; 40 41 unsigned src_format_size[PIPE_MAX_ATTRIBS]; 42 43 /* If (velem[i].src_format != native_format[i]), the vertex buffer 44 * referenced by the vertex element cannot be used for rendering and 45 * its vertex data must be translated to native_format[i]. */ 46 enum pipe_format native_format[PIPE_MAX_ATTRIBS]; 47 unsigned native_format_size[PIPE_MAX_ATTRIBS]; 48 49 /* This might mean two things: 50 * - src_format != native_format, as discussed above. 51 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */ 52 boolean incompatible_layout; 53 /* Per-element flags. */ 54 boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS]; 55}; 56 57struct u_vbuf_priv { 58 struct u_vbuf b; 59 struct pipe_context *pipe; 60 struct translate_cache *translate_cache; 61 62 /* Vertex element state bound by the state tracker. */ 63 void *saved_ve; 64 /* and its associated helper structure for this module. */ 65 struct u_vbuf_elements *ve; 66 67 /* Vertex elements used for the translate fallback. */ 68 struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS]; 69 /* If non-NULL, this is a vertex element state used for the translate 70 * fallback and therefore used for rendering too. */ 71 void *fallback_ve; 72 /* The vertex buffer slot index where translated vertices have been 73 * stored in. */ 74 unsigned fallback_vb_slot; 75 /* When binding the fallback vertex element state, we don't want to 76 * change saved_ve and ve. This is set to TRUE in such cases. */ 77 boolean ve_binding_lock; 78 79 /* Whether there is any user buffer. */ 80 boolean any_user_vbs; 81 /* Whether there is a buffer with a non-native layout. */ 82 boolean incompatible_vb_layout; 83 /* Per-buffer flags. */ 84 boolean incompatible_vb[PIPE_MAX_ATTRIBS]; 85}; 86 87static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr) 88{ 89 struct pipe_screen *screen = mgr->pipe->screen; 90 91 mgr->b.caps.format_fixed32 = 92 screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER, 93 0, PIPE_BIND_VERTEX_BUFFER); 94 95 mgr->b.caps.format_float16 = 96 screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER, 97 0, PIPE_BIND_VERTEX_BUFFER); 98 99 mgr->b.caps.format_float64 = 100 screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER, 101 0, PIPE_BIND_VERTEX_BUFFER); 102 103 mgr->b.caps.format_norm32 = 104 screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER, 105 0, PIPE_BIND_VERTEX_BUFFER) && 106 screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER, 107 0, PIPE_BIND_VERTEX_BUFFER); 108 109 mgr->b.caps.format_scaled32 = 110 screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER, 111 0, PIPE_BIND_VERTEX_BUFFER) && 112 screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER, 113 0, PIPE_BIND_VERTEX_BUFFER); 114} 115 116struct u_vbuf * 117u_vbuf_create(struct pipe_context *pipe, 118 unsigned upload_buffer_size, 119 unsigned upload_buffer_alignment, 120 unsigned upload_buffer_bind, 121 enum u_fetch_alignment fetch_alignment) 122{ 123 struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv); 124 125 mgr->pipe = pipe; 126 mgr->translate_cache = translate_cache_create(); 127 mgr->fallback_vb_slot = ~0; 128 129 mgr->b.uploader = u_upload_create(pipe, upload_buffer_size, 130 upload_buffer_alignment, 131 upload_buffer_bind); 132 133 mgr->b.caps.fetch_dword_unaligned = 134 fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED; 135 136 u_vbuf_init_format_caps(mgr); 137 138 return &mgr->b; 139} 140 141void u_vbuf_destroy(struct u_vbuf *mgrb) 142{ 143 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 144 unsigned i; 145 146 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 147 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 148 } 149 for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { 150 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 151 } 152 153 translate_cache_destroy(mgr->translate_cache); 154 u_upload_destroy(mgr->b.uploader); 155 FREE(mgr); 156} 157 158 159static unsigned u_vbuf_get_free_real_vb_slot(struct u_vbuf_priv *mgr) 160{ 161 unsigned i, nr = mgr->ve->count; 162 boolean used_vb[PIPE_MAX_ATTRIBS] = {0}; 163 164 for (i = 0; i < nr; i++) { 165 if (!mgr->ve->incompatible_layout_elem[i]) { 166 unsigned index = mgr->ve->ve[i].vertex_buffer_index; 167 168 if (!mgr->incompatible_vb[index]) { 169 used_vb[index] = TRUE; 170 } 171 } 172 } 173 174 for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { 175 if (!used_vb[i]) { 176 if (i >= mgr->b.nr_real_vertex_buffers) { 177 mgr->b.nr_real_vertex_buffers = i+1; 178 } 179 return i; 180 } 181 } 182 return ~0; 183} 184 185static void 186u_vbuf_translate_begin(struct u_vbuf_priv *mgr, 187 int min_index, int max_index) 188{ 189 struct translate_key key; 190 struct translate_element *te; 191 unsigned tr_elem_index[PIPE_MAX_ATTRIBS]; 192 struct translate *tr; 193 boolean vb_translated[PIPE_MAX_ATTRIBS] = {0}; 194 uint8_t *out_map; 195 struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; 196 struct pipe_resource *out_buffer = NULL; 197 unsigned i, out_offset, num_verts = max_index + 1 - min_index; 198 199 memset(&key, 0, sizeof(key)); 200 memset(tr_elem_index, 0xff, sizeof(tr_elem_index)); 201 202 /* Get a new vertex buffer slot. */ 203 mgr->fallback_vb_slot = u_vbuf_get_free_real_vb_slot(mgr); 204 205 if (mgr->fallback_vb_slot == ~0) { 206 return; /* XXX error, not enough attribs */ 207 } 208 209 /* Initialize the description of how vertices should be translated. */ 210 for (i = 0; i < mgr->ve->count; i++) { 211 enum pipe_format output_format = mgr->ve->native_format[i]; 212 unsigned output_format_size = mgr->ve->native_format_size[i]; 213 214 /* Check for support. */ 215 if (!mgr->ve->incompatible_layout_elem[i] && 216 !mgr->incompatible_vb[mgr->ve->ve[i].vertex_buffer_index]) { 217 continue; 218 } 219 220 assert(translate_is_output_format_supported(output_format)); 221 222 /* Add this vertex element. */ 223 te = &key.element[key.nr_elements]; 224 te->type = TRANSLATE_ELEMENT_NORMAL; 225 te->instance_divisor = 0; 226 te->input_buffer = mgr->ve->ve[i].vertex_buffer_index; 227 te->input_format = mgr->ve->ve[i].src_format; 228 te->input_offset = mgr->ve->ve[i].src_offset; 229 te->output_format = output_format; 230 te->output_offset = key.output_stride; 231 232 key.output_stride += output_format_size; 233 vb_translated[mgr->ve->ve[i].vertex_buffer_index] = TRUE; 234 tr_elem_index[i] = key.nr_elements; 235 key.nr_elements++; 236 } 237 238 /* Get a translate object. */ 239 tr = translate_cache_find(mgr->translate_cache, &key); 240 241 /* Map buffers we want to translate. */ 242 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 243 if (vb_translated[i]) { 244 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; 245 unsigned offset = vb->buffer_offset + vb->stride * min_index; 246 unsigned size = vb->stride ? num_verts * vb->stride 247 : vb->buffer->width0 - offset; 248 uint8_t *map; 249 250 if (u_vbuf_resource(vb->buffer)->user_ptr) { 251 map = u_vbuf_resource(vb->buffer)->user_ptr + offset; 252 } else { 253 map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size, 254 PIPE_TRANSFER_READ, &vb_transfer[i]); 255 } 256 257 tr->set_buffer(tr, i, map, vb->stride, ~0); 258 } 259 } 260 261 /* Create and map the output buffer. */ 262 u_upload_alloc(mgr->b.uploader, 263 key.output_stride * min_index, 264 key.output_stride * num_verts, 265 &out_offset, &out_buffer, 266 (void**)&out_map); 267 268 out_offset -= key.output_stride * min_index; 269 270 /* Translate. */ 271 tr->run(tr, 0, num_verts, 0, out_map); 272 273 /* Unmap all buffers. */ 274 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { 275 if (vb_transfer[i]) { 276 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); 277 } 278 } 279 280 /* Setup the new vertex buffer. */ 281 mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer_offset = out_offset; 282 mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].stride = key.output_stride; 283 284 /* Move the buffer reference. */ 285 pipe_resource_reference( 286 &mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer, NULL); 287 mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer = out_buffer; 288 out_buffer = NULL; 289 290 /* Setup new vertex elements. */ 291 for (i = 0; i < mgr->ve->count; i++) { 292 if (tr_elem_index[i] < key.nr_elements) { 293 te = &key.element[tr_elem_index[i]]; 294 mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; 295 mgr->fallback_velems[i].src_format = te->output_format; 296 mgr->fallback_velems[i].src_offset = te->output_offset; 297 mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vb_slot; 298 } else { 299 memcpy(&mgr->fallback_velems[i], &mgr->ve->ve[i], 300 sizeof(struct pipe_vertex_element)); 301 } 302 } 303 304 305 mgr->fallback_ve = 306 mgr->pipe->create_vertex_elements_state(mgr->pipe, mgr->ve->count, 307 mgr->fallback_velems); 308 309 /* Preserve saved_ve. */ 310 mgr->ve_binding_lock = TRUE; 311 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->fallback_ve); 312 mgr->ve_binding_lock = FALSE; 313} 314 315static void u_vbuf_translate_end(struct u_vbuf_priv *mgr) 316{ 317 if (mgr->fallback_ve == NULL) { 318 return; 319 } 320 321 /* Restore vertex elements. */ 322 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */ 323 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve); 324 mgr->pipe->delete_vertex_elements_state(mgr->pipe, mgr->fallback_ve); 325 mgr->fallback_ve = NULL; 326 327 /* Delete the now-unused VBO. */ 328 pipe_resource_reference(&mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer, 329 NULL); 330 mgr->fallback_vb_slot = ~0; 331 mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; 332} 333 334#define FORMAT_REPLACE(what, withwhat) \ 335 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break 336 337struct u_vbuf_elements * 338u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, 339 unsigned count, 340 const struct pipe_vertex_element *attribs, 341 struct pipe_vertex_element *native_attribs) 342{ 343 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 344 unsigned i; 345 struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements); 346 347 ve->count = count; 348 349 if (!count) { 350 return ve; 351 } 352 353 memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count); 354 memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count); 355 356 /* Set the best native format in case the original format is not 357 * supported. */ 358 for (i = 0; i < count; i++) { 359 enum pipe_format format = ve->ve[i].src_format; 360 361 ve->src_format_size[i] = util_format_get_blocksize(format); 362 363 /* Choose a native format. 364 * For now we don't care about the alignment, that's going to 365 * be sorted out later. */ 366 if (!mgr->b.caps.format_fixed32) { 367 switch (format) { 368 FORMAT_REPLACE(R32_FIXED, R32_FLOAT); 369 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); 370 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT); 371 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT); 372 default:; 373 } 374 } 375 if (!mgr->b.caps.format_float16) { 376 switch (format) { 377 FORMAT_REPLACE(R16_FLOAT, R32_FLOAT); 378 FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT); 379 FORMAT_REPLACE(R16G16B16_FLOAT, R32G32B32_FLOAT); 380 FORMAT_REPLACE(R16G16B16A16_FLOAT, R32G32B32A32_FLOAT); 381 default:; 382 } 383 } 384 if (!mgr->b.caps.format_float64) { 385 switch (format) { 386 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); 387 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); 388 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT); 389 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT); 390 default:; 391 } 392 } 393 if (!mgr->b.caps.format_norm32) { 394 switch (format) { 395 FORMAT_REPLACE(R32_UNORM, R32_FLOAT); 396 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); 397 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT); 398 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT); 399 FORMAT_REPLACE(R32_SNORM, R32_FLOAT); 400 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT); 401 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT); 402 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT); 403 default:; 404 } 405 } 406 if (!mgr->b.caps.format_scaled32) { 407 switch (format) { 408 FORMAT_REPLACE(R32_USCALED, R32_FLOAT); 409 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); 410 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT); 411 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT); 412 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT); 413 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT); 414 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT); 415 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT); 416 default:; 417 } 418 } 419 420 native_attribs[i].src_format = format; 421 ve->native_format[i] = format; 422 ve->native_format_size[i] = 423 util_format_get_blocksize(ve->native_format[i]); 424 425 ve->incompatible_layout_elem[i] = 426 ve->ve[i].src_format != ve->native_format[i] || 427 (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0); 428 ve->incompatible_layout = 429 ve->incompatible_layout || 430 ve->incompatible_layout_elem[i]; 431 } 432 433 /* Align the formats to the size of DWORD if needed. */ 434 if (!mgr->b.caps.fetch_dword_unaligned) { 435 for (i = 0; i < count; i++) { 436 ve->native_format_size[i] = align(ve->native_format_size[i], 4); 437 } 438 } 439 440 return ve; 441} 442 443void u_vbuf_bind_vertex_elements(struct u_vbuf *mgrb, 444 void *cso, 445 struct u_vbuf_elements *ve) 446{ 447 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 448 449 if (!cso) { 450 return; 451 } 452 453 if (!mgr->ve_binding_lock) { 454 mgr->saved_ve = cso; 455 mgr->ve = ve; 456 } 457} 458 459void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr, 460 struct u_vbuf_elements *ve) 461{ 462 FREE(ve); 463} 464 465void u_vbuf_set_vertex_buffers(struct u_vbuf *mgrb, 466 unsigned count, 467 const struct pipe_vertex_buffer *bufs) 468{ 469 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 470 unsigned i; 471 472 mgr->any_user_vbs = FALSE; 473 mgr->incompatible_vb_layout = FALSE; 474 memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); 475 476 if (!mgr->b.caps.fetch_dword_unaligned) { 477 /* Check if the strides and offsets are aligned to the size of DWORD. */ 478 for (i = 0; i < count; i++) { 479 if (bufs[i].buffer) { 480 if (bufs[i].stride % 4 != 0 || 481 bufs[i].buffer_offset % 4 != 0) { 482 mgr->incompatible_vb_layout = TRUE; 483 mgr->incompatible_vb[i] = TRUE; 484 } 485 } 486 } 487 } 488 489 for (i = 0; i < count; i++) { 490 const struct pipe_vertex_buffer *vb = &bufs[i]; 491 492 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer); 493 494 mgr->b.real_vertex_buffer[i].buffer_offset = 495 mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset; 496 497 mgr->b.real_vertex_buffer[i].stride = 498 mgr->b.vertex_buffer[i].stride = vb->stride; 499 500 if (!vb->buffer || 501 mgr->incompatible_vb[i]) { 502 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 503 continue; 504 } 505 506 if (u_vbuf_resource(vb->buffer)->user_ptr) { 507 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 508 mgr->any_user_vbs = TRUE; 509 continue; 510 } 511 512 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer); 513 } 514 515 for (i = count; i < mgr->b.nr_vertex_buffers; i++) { 516 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); 517 } 518 for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) { 519 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); 520 } 521 522 mgr->b.nr_vertex_buffers = count; 523 mgr->b.nr_real_vertex_buffers = count; 524} 525 526void u_vbuf_set_index_buffer(struct u_vbuf *mgr, 527 const struct pipe_index_buffer *ib) 528{ 529 if (ib && ib->buffer) { 530 assert(ib->offset % ib->index_size == 0); 531 pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); 532 mgr->index_buffer.offset = ib->offset; 533 mgr->index_buffer.index_size = ib->index_size; 534 } else { 535 pipe_resource_reference(&mgr->index_buffer.buffer, NULL); 536 } 537} 538 539static void 540u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, 541 int min_index, int max_index, 542 unsigned start_instance, unsigned instance_count) 543{ 544 unsigned i; 545 unsigned count = max_index + 1 - min_index; 546 unsigned nr_velems = mgr->ve->count; 547 unsigned nr_vbufs = mgr->b.nr_vertex_buffers; 548 struct pipe_vertex_element *velems = 549 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 550 unsigned start_offset[PIPE_MAX_ATTRIBS]; 551 unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; 552 553 /* Determine how much data needs to be uploaded. */ 554 for (i = 0; i < nr_velems; i++) { 555 struct pipe_vertex_element *velem = &velems[i]; 556 unsigned index = velem->vertex_buffer_index; 557 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index]; 558 unsigned instance_div, first, size; 559 560 /* Skip the buffer generated by translate. */ 561 if (index == mgr->fallback_vb_slot) { 562 continue; 563 } 564 565 assert(vb->buffer); 566 567 if (!u_vbuf_resource(vb->buffer)->user_ptr) { 568 continue; 569 } 570 571 instance_div = velem->instance_divisor; 572 first = vb->buffer_offset + velem->src_offset; 573 574 if (!vb->stride) { 575 /* Constant attrib. */ 576 size = mgr->ve->src_format_size[i]; 577 } else if (instance_div) { 578 /* Per-instance attrib. */ 579 unsigned count = (instance_count + instance_div - 1) / instance_div; 580 first += vb->stride * start_instance; 581 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; 582 } else { 583 /* Per-vertex attrib. */ 584 first += vb->stride * min_index; 585 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; 586 } 587 588 /* Update offsets. */ 589 if (!end_offset[index]) { 590 start_offset[index] = first; 591 end_offset[index] = first + size; 592 } else { 593 if (first < start_offset[index]) 594 start_offset[index] = first; 595 if (first + size > end_offset[index]) 596 end_offset[index] = first + size; 597 } 598 } 599 600 /* Upload buffers. */ 601 for (i = 0; i < nr_vbufs; i++) { 602 unsigned start, end = end_offset[i]; 603 struct pipe_vertex_buffer *real_vb; 604 uint8_t *ptr; 605 606 if (!end) { 607 continue; 608 } 609 610 start = start_offset[i]; 611 assert(start < end); 612 613 real_vb = &mgr->b.real_vertex_buffer[i]; 614 ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr; 615 616 u_upload_data(mgr->b.uploader, start, end - start, ptr + start, 617 &real_vb->buffer_offset, &real_vb->buffer); 618 619 real_vb->buffer_offset -= start; 620 } 621} 622 623unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgrb) 624{ 625 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 626 unsigned i, nr = mgr->ve->count; 627 struct pipe_vertex_element *velems = 628 mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; 629 unsigned result = ~0; 630 631 for (i = 0; i < nr; i++) { 632 struct pipe_vertex_buffer *vb = 633 &mgr->b.real_vertex_buffer[velems[i].vertex_buffer_index]; 634 unsigned size, max_count, value; 635 636 /* We're not interested in constant and per-instance attribs. */ 637 if (!vb->buffer || 638 !vb->stride || 639 velems[i].instance_divisor) { 640 continue; 641 } 642 643 size = vb->buffer->width0; 644 645 /* Subtract buffer_offset. */ 646 value = vb->buffer_offset; 647 if (value >= size) { 648 return 0; 649 } 650 size -= value; 651 652 /* Subtract src_offset. */ 653 value = velems[i].src_offset; 654 if (value >= size) { 655 return 0; 656 } 657 size -= value; 658 659 /* Subtract format_size. */ 660 value = mgr->ve->native_format_size[i]; 661 if (value >= size) { 662 return 0; 663 } 664 size -= value; 665 666 /* Compute the max count. */ 667 max_count = 1 + size / vb->stride; 668 result = MIN2(result, max_count); 669 } 670 return result; 671} 672 673static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr) 674{ 675 unsigned i, nr = mgr->ve->count; 676 677 for (i = 0; i < nr; i++) { 678 struct pipe_vertex_buffer *vb; 679 unsigned index; 680 681 /* Per-instance attribs don't need min/max_index. */ 682 if (mgr->ve->ve[i].instance_divisor) { 683 continue; 684 } 685 686 index = mgr->ve->ve[i].vertex_buffer_index; 687 vb = &mgr->b.vertex_buffer[index]; 688 689 /* Constant attribs don't need min/max_index. */ 690 if (!vb->stride) { 691 continue; 692 } 693 694 /* Per-vertex attribs need min/max_index. */ 695 if (u_vbuf_resource(vb->buffer)->user_ptr || 696 mgr->ve->incompatible_layout_elem[i] || 697 mgr->incompatible_vb[index]) { 698 return TRUE; 699 } 700 } 701 702 return FALSE; 703} 704 705static void u_vbuf_get_minmax_index(struct pipe_context *pipe, 706 struct pipe_index_buffer *ib, 707 const struct pipe_draw_info *info, 708 int *out_min_index, 709 int *out_max_index) 710{ 711 struct pipe_transfer *transfer = NULL; 712 const void *indices; 713 unsigned i; 714 unsigned restart_index = info->restart_index; 715 716 if (u_vbuf_resource(ib->buffer)->user_ptr) { 717 indices = u_vbuf_resource(ib->buffer)->user_ptr + 718 ib->offset + info->start * ib->index_size; 719 } else { 720 indices = pipe_buffer_map_range(pipe, ib->buffer, 721 ib->offset + info->start * ib->index_size, 722 info->count * ib->index_size, 723 PIPE_TRANSFER_READ, &transfer); 724 } 725 726 switch (ib->index_size) { 727 case 4: { 728 const unsigned *ui_indices = (const unsigned*)indices; 729 unsigned max_ui = 0; 730 unsigned min_ui = ~0U; 731 if (info->primitive_restart) { 732 for (i = 0; i < info->count; i++) { 733 if (ui_indices[i] != restart_index) { 734 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 735 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 736 } 737 } 738 } 739 else { 740 for (i = 0; i < info->count; i++) { 741 if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; 742 if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; 743 } 744 } 745 *out_min_index = min_ui; 746 *out_max_index = max_ui; 747 break; 748 } 749 case 2: { 750 const unsigned short *us_indices = (const unsigned short*)indices; 751 unsigned max_us = 0; 752 unsigned min_us = ~0U; 753 if (info->primitive_restart) { 754 for (i = 0; i < info->count; i++) { 755 if (us_indices[i] != restart_index) { 756 if (us_indices[i] > max_us) max_us = us_indices[i]; 757 if (us_indices[i] < min_us) min_us = us_indices[i]; 758 } 759 } 760 } 761 else { 762 for (i = 0; i < info->count; i++) { 763 if (us_indices[i] > max_us) max_us = us_indices[i]; 764 if (us_indices[i] < min_us) min_us = us_indices[i]; 765 } 766 } 767 *out_min_index = min_us; 768 *out_max_index = max_us; 769 break; 770 } 771 case 1: { 772 const unsigned char *ub_indices = (const unsigned char*)indices; 773 unsigned max_ub = 0; 774 unsigned min_ub = ~0U; 775 if (info->primitive_restart) { 776 for (i = 0; i < info->count; i++) { 777 if (ub_indices[i] != restart_index) { 778 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 779 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 780 } 781 } 782 } 783 else { 784 for (i = 0; i < info->count; i++) { 785 if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; 786 if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; 787 } 788 } 789 *out_min_index = min_ub; 790 *out_max_index = max_ub; 791 break; 792 } 793 default: 794 assert(0); 795 *out_min_index = 0; 796 *out_max_index = 0; 797 } 798 799 if (transfer) { 800 pipe_buffer_unmap(pipe, transfer); 801 } 802} 803 804enum u_vbuf_return_flags 805u_vbuf_draw_begin(struct u_vbuf *mgrb, 806 const struct pipe_draw_info *info) 807{ 808 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 809 int min_index, max_index; 810 811 if (!mgr->incompatible_vb_layout && 812 !mgr->ve->incompatible_layout && 813 !mgr->any_user_vbs) { 814 return 0; 815 } 816 817 if (info->indexed) { 818 if (info->max_index != ~0) { 819 min_index = info->min_index + info->index_bias; 820 max_index = info->max_index + info->index_bias; 821 } else if (u_vbuf_need_minmax_index(mgr)) { 822 u_vbuf_get_minmax_index(mgr->pipe, &mgr->b.index_buffer, info, 823 &min_index, &max_index); 824 min_index += info->index_bias; 825 max_index += info->index_bias; 826 } else { 827 min_index = 0; 828 max_index = 0; 829 } 830 } else { 831 min_index = info->start; 832 max_index = info->start + info->count - 1; 833 } 834 835 /* Translate vertices with non-native layouts or formats. */ 836 if (mgr->incompatible_vb_layout || mgr->ve->incompatible_layout) { 837 u_vbuf_translate_begin(mgr, min_index, max_index); 838 } 839 840 /* Upload user buffers. */ 841 if (mgr->any_user_vbs) { 842 u_vbuf_upload_buffers(mgr, min_index, max_index, 843 info->start_instance, info->instance_count); 844 } 845 return U_VBUF_BUFFERS_UPDATED; 846} 847 848void u_vbuf_draw_end(struct u_vbuf *mgrb) 849{ 850 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; 851 852 if (mgr->fallback_ve) { 853 u_vbuf_translate_end(mgr); 854 } 855} 856