r300_render.c revision fff5be8e7b4557c221f2425dcafc2e7cbbba76ba
1/* 2 * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. */ 22 23/* r300_render: Vertex and index buffer primitive emission. Contains both 24 * HW TCL fastpath rendering, and SW TCL Draw-assisted rendering. */ 25 26#include "draw/draw_context.h" 27#include "draw/draw_vbuf.h" 28 29#include "util/u_inlines.h" 30 31#include "util/u_format.h" 32#include "util/u_memory.h" 33#include "util/u_upload_mgr.h" 34#include "util/u_prim.h" 35 36#include "r300_cs.h" 37#include "r300_context.h" 38#include "r300_screen_buffer.h" 39#include "r300_emit.h" 40#include "r300_reg.h" 41#include "r300_render.h" 42#include "r300_state_derived.h" 43 44/* r300_render: Vertex and index buffer primitive emission. */ 45#define R300_MAX_VBO_SIZE (1024 * 1024) 46 47/* XXX The DRM rejects VAP_ALT_NUM_VERTICES.. */ 48//#define ENABLE_ALT_NUM_VERTS 49 50uint32_t r300_translate_primitive(unsigned prim) 51{ 52 switch (prim) { 53 case PIPE_PRIM_POINTS: 54 return R300_VAP_VF_CNTL__PRIM_POINTS; 55 case PIPE_PRIM_LINES: 56 return R300_VAP_VF_CNTL__PRIM_LINES; 57 case PIPE_PRIM_LINE_LOOP: 58 return R300_VAP_VF_CNTL__PRIM_LINE_LOOP; 59 case PIPE_PRIM_LINE_STRIP: 60 return R300_VAP_VF_CNTL__PRIM_LINE_STRIP; 61 case PIPE_PRIM_TRIANGLES: 62 return R300_VAP_VF_CNTL__PRIM_TRIANGLES; 63 case PIPE_PRIM_TRIANGLE_STRIP: 64 return R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP; 65 case PIPE_PRIM_TRIANGLE_FAN: 66 return R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN; 67 case PIPE_PRIM_QUADS: 68 return R300_VAP_VF_CNTL__PRIM_QUADS; 69 case PIPE_PRIM_QUAD_STRIP: 70 return R300_VAP_VF_CNTL__PRIM_QUAD_STRIP; 71 case PIPE_PRIM_POLYGON: 72 return R300_VAP_VF_CNTL__PRIM_POLYGON; 73 default: 74 return 0; 75 } 76} 77 78static uint32_t r300_provoking_vertex_fixes(struct r300_context *r300, 79 unsigned mode) 80{ 81 struct r300_rs_state* rs = (struct r300_rs_state*)r300->rs_state.state; 82 uint32_t color_control = rs->color_control; 83 84 /* By default (see r300_state.c:r300_create_rs_state) color_control is 85 * initialized to provoking the first vertex. 86 * 87 * Triangle fans must be reduced to the second vertex, not the first, in 88 * Gallium flatshade-first mode, as per the GL spec. 89 * (http://www.opengl.org/registry/specs/ARB/provoking_vertex.txt) 90 * 91 * Quads never provoke correctly in flatshade-first mode. The first 92 * vertex is never considered as provoking, so only the second, third, 93 * and fourth vertices can be selected, and both "third" and "last" modes 94 * select the fourth vertex. This is probably due to D3D lacking quads. 95 * 96 * Similarly, polygons reduce to the first, not the last, vertex, when in 97 * "last" mode, and all other modes start from the second vertex. 98 * 99 * ~ C. 100 */ 101 102 if (rs->rs.flatshade_first) { 103 switch (mode) { 104 case PIPE_PRIM_TRIANGLE_FAN: 105 color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_SECOND; 106 break; 107 case PIPE_PRIM_QUADS: 108 case PIPE_PRIM_QUAD_STRIP: 109 case PIPE_PRIM_POLYGON: 110 color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_LAST; 111 break; 112 default: 113 color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_FIRST; 114 break; 115 } 116 } else { 117 color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_LAST; 118 } 119 120 return color_control; 121} 122 123static boolean immd_is_good_idea(struct r300_context *r300, 124 unsigned count) 125{ 126 return count <= 4; 127} 128 129static void r300_emit_draw_arrays_immediate(struct r300_context *r300, 130 unsigned mode, 131 unsigned start, 132 unsigned count) 133{ 134 struct pipe_vertex_element* velem; 135 struct pipe_vertex_buffer* vbuf; 136 unsigned vertex_element_count = r300->vertex_element_count; 137 unsigned i, v, vbi, dw, elem_offset; 138 139 /* Size of the vertex, in dwords. */ 140 unsigned vertex_size = 0; 141 142 /* Offsets of the attribute, in dwords, from the start of the vertex. */ 143 unsigned offset[PIPE_MAX_ATTRIBS]; 144 145 /* Size of the vertex element, in dwords. */ 146 unsigned size[PIPE_MAX_ATTRIBS]; 147 148 /* Stride to the same attrib in the next vertex in the vertex buffer, 149 * in dwords. */ 150 unsigned stride[PIPE_MAX_ATTRIBS] = {0}; 151 152 /* Mapped vertex buffers. */ 153 uint32_t* map[PIPE_MAX_ATTRIBS] = {0}; 154 155 CS_LOCALS(r300); 156 157 /* Calculate the vertex size, offsets, strides etc. and map the buffers. */ 158 for (i = 0; i < vertex_element_count; i++) { 159 velem = &r300->vertex_element[i]; 160 offset[i] = velem->src_offset / 4; 161 size[i] = util_format_get_blocksize(velem->src_format) / 4; 162 vertex_size += size[i]; 163 vbi = velem->vertex_buffer_index; 164 165 /* Map the buffer. */ 166 if (!map[vbi]) { 167 vbuf = &r300->vertex_buffer[vbi]; 168 map[vbi] = (uint32_t*)pipe_buffer_map(r300->context.screen, 169 vbuf->buffer, 170 PIPE_BUFFER_USAGE_CPU_READ); 171 map[vbi] += vbuf->buffer_offset / 4; 172 stride[vbi] = vbuf->stride / 4; 173 } 174 } 175 176 r300_emit_dirty_state(r300); 177 178 BEGIN_CS(10 + count * vertex_size); 179 OUT_CS_REG(R300_GA_COLOR_CONTROL, 180 r300_provoking_vertex_fixes(r300, mode)); 181 OUT_CS_REG(R300_VAP_VTX_SIZE, vertex_size); 182 OUT_CS_REG(R300_VAP_VF_MIN_VTX_INDX, 0); 183 OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, count - 1); 184 OUT_CS_PKT3(R300_PACKET3_3D_DRAW_IMMD_2, count * vertex_size); 185 OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED | (count << 16) | 186 r300_translate_primitive(mode)); 187 188 /* Emit vertices. */ 189 for (v = 0; v < count; v++) { 190 for (i = 0; i < vertex_element_count; i++) { 191 velem = &r300->vertex_element[i]; 192 vbi = velem->vertex_buffer_index; 193 elem_offset = offset[i] + stride[vbi] * (v + start); 194 195 for (dw = 0; dw < size[i]; dw++) { 196 OUT_CS(map[vbi][elem_offset + dw]); 197 } 198 } 199 } 200 END_CS; 201 202 /* Unmap buffers. */ 203 for (i = 0; i < vertex_element_count; i++) { 204 vbi = r300->vertex_element[i].vertex_buffer_index; 205 206 if (map[vbi]) { 207 vbuf = &r300->vertex_buffer[vbi]; 208 pipe_buffer_unmap(r300->context.screen, vbuf->buffer); 209 map[vbi] = NULL; 210 } 211 } 212} 213 214static void r300_emit_draw_arrays(struct r300_context *r300, 215 unsigned mode, 216 unsigned count) 217{ 218#if defined(ENABLE_ALT_NUM_VERTS) 219 boolean alt_num_verts = count > 65535; 220#else 221 boolean alt_num_verts = FALSE; 222#endif 223 CS_LOCALS(r300); 224 225 if (alt_num_verts) { 226 assert(count < (1 << 24)); 227 BEGIN_CS(10); 228 OUT_CS_REG(R500_VAP_ALT_NUM_VERTICES, count); 229 } else { 230 BEGIN_CS(8); 231 } 232 OUT_CS_REG(R300_GA_COLOR_CONTROL, 233 r300_provoking_vertex_fixes(r300, mode)); 234 OUT_CS_REG(R300_VAP_VF_MIN_VTX_INDX, 0); 235 OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, count - 1); 236 OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0); 237 OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) | 238 r300_translate_primitive(mode) | 239 (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0)); 240 END_CS; 241} 242 243static void r300_emit_draw_elements(struct r300_context *r300, 244 struct pipe_buffer* indexBuffer, 245 unsigned indexSize, 246 unsigned minIndex, 247 unsigned maxIndex, 248 unsigned mode, 249 unsigned start, 250 unsigned count) 251{ 252 uint32_t count_dwords; 253 uint32_t offset_dwords = indexSize * start / sizeof(uint32_t); 254#if defined(ENABLE_ALT_NUM_VERTS) 255 boolean alt_num_verts = count > 65535; 256#else 257 boolean alt_num_verts = FALSE; 258#endif 259 CS_LOCALS(r300); 260 261 assert((start * indexSize) % 4 == 0); 262 263 /* XXX Non-zero offset locks up. */ 264 if (offset_dwords != 0) { 265 return; 266 } 267 268 if (alt_num_verts) { 269 assert(count < (1 << 24)); 270 BEGIN_CS(16); 271 OUT_CS_REG(R500_VAP_ALT_NUM_VERTICES, count); 272 } else { 273 BEGIN_CS(14); 274 } 275 OUT_CS_REG(R300_GA_COLOR_CONTROL, 276 r300_provoking_vertex_fixes(r300, mode)); 277 OUT_CS_REG(R300_VAP_VF_MIN_VTX_INDX, minIndex); 278 OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, maxIndex); 279 OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 0); 280 if (indexSize == 4) { 281 count_dwords = count + start; 282 OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) | 283 R300_VAP_VF_CNTL__INDEX_SIZE_32bit | 284 r300_translate_primitive(mode) | 285 (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0)); 286 } else { 287 count_dwords = (count + start + 1) / 2; 288 OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) | 289 r300_translate_primitive(mode) | 290 (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0)); 291 } 292 293 /* INDX_BUFFER is a truly special packet3. 294 * Unlike most other packet3, where the offset is after the count, 295 * the order is reversed, so the relocation ends up carrying the 296 * size of the indexbuf instead of the offset. 297 */ 298 OUT_CS_PKT3(R300_PACKET3_INDX_BUFFER, 2); 299 OUT_CS(R300_INDX_BUFFER_ONE_REG_WR | (R300_VAP_PORT_IDX0 >> 2) | 300 (0 << R300_INDX_BUFFER_SKIP_SHIFT)); 301 OUT_CS(offset_dwords << 2); 302 OUT_CS_BUF_RELOC(indexBuffer, count_dwords, 303 RADEON_GEM_DOMAIN_GTT, 0, 0); 304 305 END_CS; 306} 307 308static boolean r300_setup_vertex_buffers(struct r300_context *r300) 309{ 310 struct pipe_vertex_buffer *vbuf = r300->vertex_buffer; 311 struct pipe_vertex_element *velem = r300->vertex_element; 312 struct pipe_buffer *pbuf; 313 int ret; 314 315 /* upload buffers first */ 316 if (r300->any_user_vbs) { 317 ret = r300_upload_user_buffers(r300); 318 r300->any_user_vbs = false; 319 } 320 321validate: 322 for (int i = 0; i < r300->vertex_element_count; i++) { 323 pbuf = vbuf[velem[i].vertex_buffer_index].buffer; 324 325 if (!r300_add_buffer(r300->rws, pbuf, 326 RADEON_GEM_DOMAIN_GTT, 0)) { 327 r300->context.flush(&r300->context, 0, NULL); 328 goto validate; 329 } 330 } 331 332 if (!r300->rws->validate(r300->rws)) { 333 r300->context.flush(&r300->context, 0, NULL); 334 return r300->rws->validate(r300->rws); 335 } 336 337 return TRUE; 338} 339 340static void r300_shorten_ubyte_elts(struct r300_context* r300, 341 struct pipe_buffer** elts, 342 unsigned count) 343{ 344 struct pipe_screen* screen = r300->context.screen; 345 struct pipe_buffer* new_elts; 346 unsigned char *in_map; 347 unsigned short *out_map; 348 unsigned i; 349 350 new_elts = screen->buffer_create(screen, 32, 351 PIPE_BUFFER_USAGE_INDEX | 352 PIPE_BUFFER_USAGE_CPU_WRITE | 353 PIPE_BUFFER_USAGE_GPU_READ, 354 2 * count); 355 356 in_map = pipe_buffer_map(screen, *elts, PIPE_BUFFER_USAGE_CPU_READ); 357 out_map = pipe_buffer_map(screen, new_elts, PIPE_BUFFER_USAGE_CPU_WRITE); 358 359 for (i = 0; i < count; i++) { 360 *out_map = (unsigned short)*in_map; 361 in_map++; 362 out_map++; 363 } 364 365 pipe_buffer_unmap(screen, *elts); 366 pipe_buffer_unmap(screen, new_elts); 367 368 *elts = new_elts; 369} 370 371/* This is the fast-path drawing & emission for HW TCL. */ 372void r300_draw_range_elements(struct pipe_context* pipe, 373 struct pipe_buffer* indexBuffer, 374 unsigned indexSize, 375 unsigned minIndex, 376 unsigned maxIndex, 377 unsigned mode, 378 unsigned start, 379 unsigned count) 380{ 381 struct r300_context* r300 = r300_context(pipe); 382 struct pipe_buffer* orgIndexBuffer = indexBuffer; 383#if defined(ENABLE_ALT_NUM_VERTS) 384 boolean alt_num_verts = r300_screen(pipe->screen)->caps->is_r500 && 385 count > 65536; 386#else 387 boolean alt_num_verts = FALSE; 388#endif 389 unsigned short_count; 390 391 if (!u_trim_pipe_prim(mode, &count)) { 392 return; 393 } 394 395 r300_update_derived_state(r300); 396 397 r300_emit_buffer_validate(r300); 398 399 if (!r300_setup_vertex_buffers(r300)) { 400 return; 401 } 402 403 if (indexSize == 1) { 404 r300_shorten_ubyte_elts(r300, &indexBuffer, count); 405 indexSize = 2; 406 } 407 408 r300_upload_index_buffer(r300, &indexBuffer, 409 indexSize, start, count); 410 411 if (!r300_add_buffer(r300->rws, indexBuffer, 412 RADEON_GEM_DOMAIN_GTT, 0)) { 413 goto cleanup; 414 } 415 416 if (!r300->rws->validate(r300->rws)) { 417 goto cleanup; 418 } 419 420 u_upload_flush(r300->upload_vb); 421 u_upload_flush(r300->upload_ib); 422 r300_emit_dirty_state(r300); 423 424 r300_emit_aos(r300, 0); 425 426 if (alt_num_verts || count <= 65535) { 427 r300_emit_draw_elements(r300, indexBuffer, indexSize, minIndex, 428 maxIndex, mode, start, count); 429 } else { 430 do { 431 short_count = MIN2(count, 65534); 432 r300_emit_draw_elements(r300, indexBuffer, indexSize, minIndex, 433 maxIndex, mode, start, short_count); 434 435 start += short_count; 436 count -= short_count; 437 } while (count); 438 } 439 440cleanup: 441 if (indexBuffer != orgIndexBuffer) { 442 pipe_buffer_reference( &indexBuffer, NULL ); 443 } 444} 445 446/* Simple helpers for context setup. Should probably be moved to util. */ 447void r300_draw_elements(struct pipe_context* pipe, 448 struct pipe_buffer* indexBuffer, 449 unsigned indexSize, unsigned mode, 450 unsigned start, unsigned count) 451{ 452 pipe->draw_range_elements(pipe, indexBuffer, indexSize, 0, ~0, 453 mode, start, count); 454} 455 456void r300_draw_arrays(struct pipe_context* pipe, unsigned mode, 457 unsigned start, unsigned count) 458{ 459 struct r300_context* r300 = r300_context(pipe); 460#if defined(ENABLE_ALT_NUM_VERTS) 461 boolean alt_num_verts = r300_screen(pipe->screen)->caps->is_r500 && 462 count > 65536; 463#else 464 boolean alt_num_verts = FALSE; 465#endif 466 unsigned short_count; 467 468 if (!u_trim_pipe_prim(mode, &count)) { 469 return; 470 } 471 472 r300_update_derived_state(r300); 473 474 r300_emit_buffer_validate(r300); 475 476 if (immd_is_good_idea(r300, count)) { 477 r300_emit_draw_arrays_immediate(r300, mode, start, count); 478 } else { 479 if (!r300_setup_vertex_buffers(r300)) { 480 return; 481 } 482 483 u_upload_flush(r300->upload_vb); 484 r300_emit_dirty_state(r300); 485 486 if (alt_num_verts || count <= 65535) { 487 r300_emit_aos(r300, start); 488 r300_emit_draw_arrays(r300, mode, count); 489 } else { 490 do { 491 short_count = MIN2(count, 65535); 492 r300_emit_aos(r300, start); 493 r300_emit_draw_arrays(r300, mode, short_count); 494 495 start += short_count; 496 count -= short_count; 497 } while (count); 498 } 499 } 500} 501 502/**************************************************************************** 503 * The rest of this file is for SW TCL rendering only. Please be polite and * 504 * keep these functions separated so that they are easier to locate. ~C. * 505 ***************************************************************************/ 506 507/* SW TCL arrays, using Draw. */ 508void r300_swtcl_draw_arrays(struct pipe_context* pipe, 509 unsigned mode, 510 unsigned start, 511 unsigned count) 512{ 513 struct r300_context* r300 = r300_context(pipe); 514 int i; 515 516 if (!u_trim_pipe_prim(mode, &count)) { 517 return; 518 } 519 520 for (i = 0; i < r300->vertex_buffer_count; i++) { 521 void* buf = pipe_buffer_map(pipe->screen, 522 r300->vertex_buffer[i].buffer, 523 PIPE_BUFFER_USAGE_CPU_READ); 524 draw_set_mapped_vertex_buffer(r300->draw, i, buf); 525 } 526 527 draw_set_mapped_element_buffer(r300->draw, 0, NULL); 528 529 draw_set_mapped_constant_buffer(r300->draw, 530 PIPE_SHADER_VERTEX, 531 0, 532 r300->shader_constants[PIPE_SHADER_VERTEX].constants, 533 r300->shader_constants[PIPE_SHADER_VERTEX].count * 534 (sizeof(float) * 4)); 535 536 draw_arrays(r300->draw, mode, start, count); 537 538 for (i = 0; i < r300->vertex_buffer_count; i++) { 539 pipe_buffer_unmap(pipe->screen, r300->vertex_buffer[i].buffer); 540 draw_set_mapped_vertex_buffer(r300->draw, i, NULL); 541 } 542} 543 544/* SW TCL elements, using Draw. */ 545void r300_swtcl_draw_range_elements(struct pipe_context* pipe, 546 struct pipe_buffer* indexBuffer, 547 unsigned indexSize, 548 unsigned minIndex, 549 unsigned maxIndex, 550 unsigned mode, 551 unsigned start, 552 unsigned count) 553{ 554 struct r300_context* r300 = r300_context(pipe); 555 int i; 556 void* indices; 557 558 if (!u_trim_pipe_prim(mode, &count)) { 559 return; 560 } 561 562 for (i = 0; i < r300->vertex_buffer_count; i++) { 563 void* buf = pipe_buffer_map(pipe->screen, 564 r300->vertex_buffer[i].buffer, 565 PIPE_BUFFER_USAGE_CPU_READ); 566 draw_set_mapped_vertex_buffer(r300->draw, i, buf); 567 } 568 569 indices = pipe_buffer_map(pipe->screen, indexBuffer, 570 PIPE_BUFFER_USAGE_CPU_READ); 571 draw_set_mapped_element_buffer_range(r300->draw, indexSize, 572 minIndex, maxIndex, indices); 573 574 draw_set_mapped_constant_buffer(r300->draw, 575 PIPE_SHADER_VERTEX, 576 0, 577 r300->shader_constants[PIPE_SHADER_VERTEX].constants, 578 r300->shader_constants[PIPE_SHADER_VERTEX].count * 579 (sizeof(float) * 4)); 580 581 draw_arrays(r300->draw, mode, start, count); 582 583 for (i = 0; i < r300->vertex_buffer_count; i++) { 584 pipe_buffer_unmap(pipe->screen, r300->vertex_buffer[i].buffer); 585 draw_set_mapped_vertex_buffer(r300->draw, i, NULL); 586 } 587 588 pipe_buffer_unmap(pipe->screen, indexBuffer); 589 draw_set_mapped_element_buffer_range(r300->draw, 0, start, 590 start + count - 1, NULL); 591} 592 593/* Object for rendering using Draw. */ 594struct r300_render { 595 /* Parent class */ 596 struct vbuf_render base; 597 598 /* Pipe context */ 599 struct r300_context* r300; 600 601 /* Vertex information */ 602 size_t vertex_size; 603 unsigned prim; 604 unsigned hwprim; 605 606 /* VBO */ 607 struct pipe_buffer* vbo; 608 size_t vbo_size; 609 size_t vbo_offset; 610 size_t vbo_max_used; 611 void * vbo_ptr; 612}; 613 614static INLINE struct r300_render* 615r300_render(struct vbuf_render* render) 616{ 617 return (struct r300_render*)render; 618} 619 620static const struct vertex_info* 621r300_render_get_vertex_info(struct vbuf_render* render) 622{ 623 struct r300_render* r300render = r300_render(render); 624 struct r300_context* r300 = r300render->r300; 625 626 r300_update_derived_state(r300); 627 628 return (struct vertex_info*)r300->vertex_format_state.state; 629} 630 631static boolean r300_render_allocate_vertices(struct vbuf_render* render, 632 ushort vertex_size, 633 ushort count) 634{ 635 struct r300_render* r300render = r300_render(render); 636 struct r300_context* r300 = r300render->r300; 637 struct pipe_screen* screen = r300->context.screen; 638 size_t size = (size_t)vertex_size * (size_t)count; 639 640 if (size + r300render->vbo_offset > r300render->vbo_size) 641 { 642 pipe_buffer_reference(&r300->vbo, NULL); 643 r300render->vbo = pipe_buffer_create(screen, 644 64, 645 PIPE_BUFFER_USAGE_VERTEX, 646 R300_MAX_VBO_SIZE); 647 r300render->vbo_offset = 0; 648 r300render->vbo_size = R300_MAX_VBO_SIZE; 649 } 650 651 r300render->vertex_size = vertex_size; 652 r300->vbo = r300render->vbo; 653 r300->vbo_offset = r300render->vbo_offset; 654 655 return (r300render->vbo) ? TRUE : FALSE; 656} 657 658static void* r300_render_map_vertices(struct vbuf_render* render) 659{ 660 struct r300_render* r300render = r300_render(render); 661 struct pipe_screen* screen = r300render->r300->context.screen; 662 663 r300render->vbo_ptr = pipe_buffer_map(screen, r300render->vbo, 664 PIPE_BUFFER_USAGE_CPU_WRITE); 665 666 return ((uint8_t*)r300render->vbo_ptr + r300render->vbo_offset); 667} 668 669static void r300_render_unmap_vertices(struct vbuf_render* render, 670 ushort min, 671 ushort max) 672{ 673 struct r300_render* r300render = r300_render(render); 674 struct pipe_screen* screen = r300render->r300->context.screen; 675 CS_LOCALS(r300render->r300); 676 BEGIN_CS(2); 677 OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max); 678 END_CS; 679 680 r300render->vbo_max_used = MAX2(r300render->vbo_max_used, 681 r300render->vertex_size * (max + 1)); 682 pipe_buffer_unmap(screen, r300render->vbo); 683} 684 685static void r300_render_release_vertices(struct vbuf_render* render) 686{ 687 struct r300_render* r300render = r300_render(render); 688 689 r300render->vbo_offset += r300render->vbo_max_used; 690 r300render->vbo_max_used = 0; 691} 692 693static boolean r300_render_set_primitive(struct vbuf_render* render, 694 unsigned prim) 695{ 696 struct r300_render* r300render = r300_render(render); 697 698 r300render->prim = prim; 699 r300render->hwprim = r300_translate_primitive(prim); 700 701 return TRUE; 702} 703 704static void r300_render_draw_arrays(struct vbuf_render* render, 705 unsigned start, 706 unsigned count) 707{ 708 struct r300_render* r300render = r300_render(render); 709 struct r300_context* r300 = r300render->r300; 710 711 CS_LOCALS(r300); 712 713 r300_emit_dirty_state(r300); 714 715 DBG(r300, DBG_DRAW, "r300: Doing vbuf render, count %d\n", count); 716 717 BEGIN_CS(2); 718 OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0); 719 OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) | 720 r300render->hwprim); 721 END_CS; 722} 723 724static void r300_render_draw(struct vbuf_render* render, 725 const ushort* indices, 726 uint count) 727{ 728 struct r300_render* r300render = r300_render(render); 729 struct r300_context* r300 = r300render->r300; 730 int i; 731 732 CS_LOCALS(r300); 733 734 r300_emit_dirty_state(r300); 735 736 BEGIN_CS(2 + (count+1)/2); 737 OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, (count+1)/2); 738 OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) | 739 r300render->hwprim); 740 for (i = 0; i < count-1; i += 2) { 741 OUT_CS(indices[i+1] << 16 | indices[i]); 742 } 743 if (count % 2) { 744 OUT_CS(indices[count-1]); 745 } 746 END_CS; 747} 748 749static void r300_render_destroy(struct vbuf_render* render) 750{ 751 FREE(render); 752} 753 754static struct vbuf_render* r300_render_create(struct r300_context* r300) 755{ 756 struct r300_render* r300render = CALLOC_STRUCT(r300_render); 757 758 r300render->r300 = r300; 759 760 /* XXX find real numbers plz */ 761 r300render->base.max_vertex_buffer_bytes = 128 * 1024; 762 r300render->base.max_indices = 16 * 1024; 763 764 r300render->base.get_vertex_info = r300_render_get_vertex_info; 765 r300render->base.allocate_vertices = r300_render_allocate_vertices; 766 r300render->base.map_vertices = r300_render_map_vertices; 767 r300render->base.unmap_vertices = r300_render_unmap_vertices; 768 r300render->base.set_primitive = r300_render_set_primitive; 769 r300render->base.draw = r300_render_draw; 770 r300render->base.draw_arrays = r300_render_draw_arrays; 771 r300render->base.release_vertices = r300_render_release_vertices; 772 r300render->base.destroy = r300_render_destroy; 773 774 r300render->vbo = NULL; 775 r300render->vbo_size = 0; 776 r300render->vbo_offset = 0; 777 778 return &r300render->base; 779} 780 781struct draw_stage* r300_draw_stage(struct r300_context* r300) 782{ 783 struct vbuf_render* render; 784 struct draw_stage* stage; 785 786 render = r300_render_create(r300); 787 788 if (!render) { 789 return NULL; 790 } 791 792 stage = draw_vbuf_stage(r300->draw, render); 793 794 if (!stage) { 795 render->destroy(render); 796 return NULL; 797 } 798 799 draw_set_render(r300->draw, render); 800 801 return stage; 802} 803