r600_state_common.c revision 59a402cecd74e0a7a277f5af0b8a2707857846ed
1/* 2 * Copyright 2010 Red Hat Inc. 3 * 2010 Jerome Glisse 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * the Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 * USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie <airlied@redhat.com> 25 * Jerome Glisse <jglisse@redhat.com> 26 */ 27#include <util/u_memory.h> 28#include <util/u_format.h> 29#include <pipebuffer/pb_buffer.h> 30#include "pipe/p_shader_tokens.h" 31#include "r600_formats.h" 32#include "r600_pipe.h" 33#include "r600d.h" 34 35static void r600_spi_update(struct r600_pipe_context *rctx); 36 37static int r600_conv_pipe_prim(unsigned pprim, unsigned *prim) 38{ 39 static const int prim_conv[] = { 40 V_008958_DI_PT_POINTLIST, 41 V_008958_DI_PT_LINELIST, 42 V_008958_DI_PT_LINELOOP, 43 V_008958_DI_PT_LINESTRIP, 44 V_008958_DI_PT_TRILIST, 45 V_008958_DI_PT_TRISTRIP, 46 V_008958_DI_PT_TRIFAN, 47 V_008958_DI_PT_QUADLIST, 48 V_008958_DI_PT_QUADSTRIP, 49 V_008958_DI_PT_POLYGON, 50 -1, 51 -1, 52 -1, 53 -1 54 }; 55 56 *prim = prim_conv[pprim]; 57 if (*prim == -1) { 58 fprintf(stderr, "%s:%d unsupported %d\n", __func__, __LINE__, pprim); 59 return -1; 60 } 61 return 0; 62} 63 64/* common state between evergreen and r600 */ 65void r600_bind_blend_state(struct pipe_context *ctx, void *state) 66{ 67 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 68 struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state; 69 struct r600_pipe_state *rstate; 70 71 if (state == NULL) 72 return; 73 rstate = &blend->rstate; 74 rctx->states[rstate->id] = rstate; 75 rctx->cb_target_mask = blend->cb_target_mask; 76 r600_context_pipe_state_set(&rctx->ctx, rstate); 77} 78 79void r600_bind_dsa_state(struct pipe_context *ctx, void *state) 80{ 81 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 82 struct r600_pipe_dsa *dsa = state; 83 struct r600_pipe_state *rstate; 84 85 if (state == NULL) 86 return; 87 rstate = &dsa->rstate; 88 rctx->states[rstate->id] = rstate; 89 rctx->alpha_ref = dsa->alpha_ref; 90 rctx->alpha_ref_dirty = true; 91 r600_context_pipe_state_set(&rctx->ctx, rstate); 92} 93 94void r600_bind_rs_state(struct pipe_context *ctx, void *state) 95{ 96 struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state; 97 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 98 99 if (state == NULL) 100 return; 101 102 rctx->flatshade = rs->flatshade; 103 rctx->sprite_coord_enable = rs->sprite_coord_enable; 104 rctx->rasterizer = rs; 105 106 rctx->states[rs->rstate.id] = &rs->rstate; 107 r600_context_pipe_state_set(&rctx->ctx, &rs->rstate); 108 109 if (rctx->family >= CHIP_CEDAR) { 110 evergreen_polygon_offset_update(rctx); 111 } else { 112 r600_polygon_offset_update(rctx); 113 } 114 if (rctx->ps_shader && rctx->vs_shader) 115 r600_spi_update(rctx); 116} 117 118void r600_delete_rs_state(struct pipe_context *ctx, void *state) 119{ 120 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 121 struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state; 122 123 if (rctx->rasterizer == rs) { 124 rctx->rasterizer = NULL; 125 } 126 if (rctx->states[rs->rstate.id] == &rs->rstate) { 127 rctx->states[rs->rstate.id] = NULL; 128 } 129 free(rs); 130} 131 132void r600_sampler_view_destroy(struct pipe_context *ctx, 133 struct pipe_sampler_view *state) 134{ 135 struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state; 136 137 pipe_resource_reference(&state->texture, NULL); 138 FREE(resource); 139} 140 141void r600_delete_state(struct pipe_context *ctx, void *state) 142{ 143 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 144 struct r600_pipe_state *rstate = (struct r600_pipe_state *)state; 145 146 if (rctx->states[rstate->id] == rstate) { 147 rctx->states[rstate->id] = NULL; 148 } 149 for (int i = 0; i < rstate->nregs; i++) { 150 r600_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL); 151 } 152 free(rstate); 153} 154 155void r600_bind_vertex_elements(struct pipe_context *ctx, void *state) 156{ 157 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 158 struct r600_vertex_element *v = (struct r600_vertex_element*)state; 159 160 rctx->vertex_elements = v; 161 if (v) { 162 u_vbuf_mgr_bind_vertex_elements(rctx->vbuf_mgr, state, 163 v->vmgr_elements); 164 165 rctx->states[v->rstate.id] = &v->rstate; 166 r600_context_pipe_state_set(&rctx->ctx, &v->rstate); 167 } 168} 169 170void r600_delete_vertex_element(struct pipe_context *ctx, void *state) 171{ 172 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 173 struct r600_vertex_element *v = (struct r600_vertex_element*)state; 174 175 if (rctx->states[v->rstate.id] == &v->rstate) { 176 rctx->states[v->rstate.id] = NULL; 177 } 178 if (rctx->vertex_elements == state) 179 rctx->vertex_elements = NULL; 180 181 r600_bo_reference(rctx->radeon, &v->fetch_shader, NULL); 182 u_vbuf_mgr_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements); 183 FREE(state); 184} 185 186 187void r600_set_index_buffer(struct pipe_context *ctx, 188 const struct pipe_index_buffer *ib) 189{ 190 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 191 192 if (ib) { 193 pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer); 194 memcpy(&rctx->index_buffer, ib, sizeof(rctx->index_buffer)); 195 } else { 196 pipe_resource_reference(&rctx->index_buffer.buffer, NULL); 197 memset(&rctx->index_buffer, 0, sizeof(rctx->index_buffer)); 198 } 199 200 /* TODO make this more like a state */ 201} 202 203void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count, 204 const struct pipe_vertex_buffer *buffers) 205{ 206 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 207 int i; 208 209 /* Zero states. */ 210 for (i = 0; i < count; i++) { 211 if (!buffers[i].buffer) { 212 if (rctx->family >= CHIP_CEDAR) { 213 evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i); 214 } else { 215 r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i); 216 } 217 } 218 } 219 for (; i < rctx->vbuf_mgr->nr_real_vertex_buffers; i++) { 220 if (rctx->family >= CHIP_CEDAR) { 221 evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i); 222 } else { 223 r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i); 224 } 225 } 226 227 u_vbuf_mgr_set_vertex_buffers(rctx->vbuf_mgr, count, buffers); 228} 229 230void *r600_create_vertex_elements(struct pipe_context *ctx, 231 unsigned count, 232 const struct pipe_vertex_element *elements) 233{ 234 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 235 struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element); 236 237 assert(count < 32); 238 if (!v) 239 return NULL; 240 241 v->count = count; 242 v->vmgr_elements = 243 u_vbuf_mgr_create_vertex_elements(rctx->vbuf_mgr, count, 244 elements, v->elements); 245 246 if (r600_vertex_elements_build_fetch_shader(rctx, v)) { 247 FREE(v); 248 return NULL; 249 } 250 251 return v; 252} 253 254void *r600_create_shader_state(struct pipe_context *ctx, 255 const struct pipe_shader_state *state) 256{ 257 struct r600_pipe_shader *shader = CALLOC_STRUCT(r600_pipe_shader); 258 int r; 259 260 r = r600_pipe_shader_create(ctx, shader, state->tokens); 261 if (r) { 262 return NULL; 263 } 264 return shader; 265} 266 267void r600_bind_ps_shader(struct pipe_context *ctx, void *state) 268{ 269 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 270 271 /* TODO delete old shader */ 272 rctx->ps_shader = (struct r600_pipe_shader *)state; 273 if (state) { 274 r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate); 275 } 276 if (rctx->ps_shader && rctx->vs_shader) { 277 r600_spi_update(rctx); 278 r600_adjust_gprs(rctx); 279 } 280} 281 282void r600_bind_vs_shader(struct pipe_context *ctx, void *state) 283{ 284 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 285 286 /* TODO delete old shader */ 287 rctx->vs_shader = (struct r600_pipe_shader *)state; 288 if (state) { 289 r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_shader->rstate); 290 } 291 if (rctx->ps_shader && rctx->vs_shader) { 292 r600_spi_update(rctx); 293 r600_adjust_gprs(rctx); 294 } 295} 296 297void r600_delete_ps_shader(struct pipe_context *ctx, void *state) 298{ 299 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 300 struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state; 301 302 if (rctx->ps_shader == shader) { 303 rctx->ps_shader = NULL; 304 } 305 306 r600_pipe_shader_destroy(ctx, shader); 307 free(shader); 308} 309 310void r600_delete_vs_shader(struct pipe_context *ctx, void *state) 311{ 312 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 313 struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state; 314 315 if (rctx->vs_shader == shader) { 316 rctx->vs_shader = NULL; 317 } 318 319 r600_pipe_shader_destroy(ctx, shader); 320 free(shader); 321} 322 323static void r600_update_alpha_ref(struct r600_pipe_context *rctx) 324{ 325 unsigned alpha_ref; 326 struct r600_pipe_state rstate; 327 328 alpha_ref = rctx->alpha_ref; 329 rstate.nregs = 0; 330 if (rctx->export_16bpc) 331 alpha_ref &= ~0x1FFF; 332 r600_pipe_state_add_reg(&rstate, R_028438_SX_ALPHA_REF, alpha_ref, 0xFFFFFFFF, NULL); 333 334 r600_context_pipe_state_set(&rctx->ctx, &rstate); 335 rctx->alpha_ref_dirty = false; 336} 337 338/* FIXME optimize away spi update when it's not needed */ 339static void r600_spi_block_init(struct r600_pipe_context *rctx, struct r600_pipe_state *rstate) 340{ 341 int i; 342 rstate->nregs = 0; 343 rstate->id = R600_PIPE_STATE_SPI; 344 for (i = 0; i < 32; i++) { 345 r600_pipe_state_add_reg(rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, 0, 0xFFFFFFFF, NULL); 346 } 347} 348 349static void r600_spi_update(struct r600_pipe_context *rctx) 350{ 351 struct r600_pipe_shader *shader = rctx->ps_shader; 352 struct r600_pipe_state *rstate = &rctx->spi; 353 struct r600_shader *rshader = &shader->shader; 354 unsigned i, tmp, sid; 355 356 if (rctx->spi.id == 0) 357 r600_spi_block_init(rctx, &rctx->spi); 358 359 rstate->nregs = 0; 360 for (i = 0; i < rshader->ninput; i++) { 361 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION || 362 rshader->input[i].name == TGSI_SEMANTIC_FACE) 363 if (rctx->family >= CHIP_CEDAR) 364 continue; 365 else 366 sid=0; 367 else 368 sid=r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i); 369 370 tmp = S_028644_SEMANTIC(sid); 371 372 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR || 373 rshader->input[i].name == TGSI_SEMANTIC_BCOLOR || 374 rshader->input[i].name == TGSI_SEMANTIC_POSITION) { 375 tmp |= S_028644_FLAT_SHADE(rctx->flatshade); 376 } 377 378 if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC && 379 rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) { 380 tmp |= S_028644_PT_SPRITE_TEX(1); 381 } 382 383 if (rctx->family < CHIP_CEDAR) { 384 if (rshader->input[i].centroid) 385 tmp |= S_028644_SEL_CENTROID(1); 386 387 if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR) 388 tmp |= S_028644_SEL_LINEAR(1); 389 } 390 391 r600_pipe_state_mod_reg(rstate, tmp); 392 } 393 394 r600_context_pipe_state_set(&rctx->ctx, rstate); 395} 396 397void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, 398 struct pipe_resource *buffer) 399{ 400 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 401 struct r600_resource_buffer *rbuffer = r600_buffer(buffer); 402 struct r600_pipe_resource_state *rstate; 403 uint32_t offset; 404 405 /* Note that the state tracker can unbind constant buffers by 406 * passing NULL here. 407 */ 408 if (buffer == NULL) { 409 return; 410 } 411 412 r600_upload_const_buffer(rctx, &rbuffer, &offset); 413 offset += r600_bo_offset(rbuffer->r.bo); 414 415 switch (shader) { 416 case PIPE_SHADER_VERTEX: 417 rctx->vs_const_buffer.nregs = 0; 418 r600_pipe_state_add_reg(&rctx->vs_const_buffer, 419 R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 420 ALIGN_DIVUP(buffer->width0 >> 4, 16), 421 0xFFFFFFFF, NULL); 422 r600_pipe_state_add_reg(&rctx->vs_const_buffer, 423 R_028980_ALU_CONST_CACHE_VS_0, 424 offset >> 8, 0xFFFFFFFF, rbuffer->r.bo); 425 r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer); 426 427 rstate = &rctx->vs_const_buffer_resource[index]; 428 if (!rstate->id) { 429 if (rctx->family >= CHIP_CEDAR) { 430 evergreen_pipe_init_buffer_resource(rctx, rstate); 431 } else { 432 r600_pipe_init_buffer_resource(rctx, rstate); 433 } 434 } 435 436 if (rctx->family >= CHIP_CEDAR) { 437 evergreen_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16); 438 evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index); 439 } else { 440 r600_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16); 441 r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index); 442 } 443 break; 444 case PIPE_SHADER_FRAGMENT: 445 rctx->ps_const_buffer.nregs = 0; 446 r600_pipe_state_add_reg(&rctx->ps_const_buffer, 447 R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 448 ALIGN_DIVUP(buffer->width0 >> 4, 16), 449 0xFFFFFFFF, NULL); 450 r600_pipe_state_add_reg(&rctx->ps_const_buffer, 451 R_028940_ALU_CONST_CACHE_PS_0, 452 offset >> 8, 0xFFFFFFFF, rbuffer->r.bo); 453 r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer); 454 455 rstate = &rctx->ps_const_buffer_resource[index]; 456 if (!rstate->id) { 457 if (rctx->family >= CHIP_CEDAR) { 458 evergreen_pipe_init_buffer_resource(rctx, rstate); 459 } else { 460 r600_pipe_init_buffer_resource(rctx, rstate); 461 } 462 } 463 if (rctx->family >= CHIP_CEDAR) { 464 evergreen_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16); 465 evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index); 466 } else { 467 r600_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16); 468 r600_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index); 469 } 470 break; 471 default: 472 R600_ERR("unsupported %d\n", shader); 473 return; 474 } 475 476 if (buffer != &rbuffer->r.b.b.b) 477 pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL); 478} 479 480static void r600_vertex_buffer_update(struct r600_pipe_context *rctx) 481{ 482 struct r600_pipe_resource_state *rstate; 483 struct r600_resource *rbuffer; 484 struct pipe_vertex_buffer *vertex_buffer; 485 unsigned i, count, offset; 486 487 if (rctx->vertex_elements->vbuffer_need_offset) { 488 /* one resource per vertex elements */ 489 count = rctx->vertex_elements->count; 490 } else { 491 /* bind vertex buffer once */ 492 count = rctx->vbuf_mgr->nr_real_vertex_buffers; 493 } 494 495 for (i = 0 ; i < count; i++) { 496 rstate = &rctx->fs_resource[i]; 497 498 if (rctx->vertex_elements->vbuffer_need_offset) { 499 /* one resource per vertex elements */ 500 unsigned vbuffer_index; 501 vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index; 502 vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[vbuffer_index]; 503 rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index]; 504 offset = rctx->vertex_elements->vbuffer_offset[i]; 505 } else { 506 /* bind vertex buffer once */ 507 vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[i]; 508 rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[i]; 509 offset = 0; 510 } 511 if (vertex_buffer == NULL || rbuffer == NULL) 512 continue; 513 offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo); 514 515 if (!rstate->id) { 516 if (rctx->family >= CHIP_CEDAR) { 517 evergreen_pipe_init_buffer_resource(rctx, rstate); 518 } else { 519 r600_pipe_init_buffer_resource(rctx, rstate); 520 } 521 } 522 523 if (rctx->family >= CHIP_CEDAR) { 524 evergreen_pipe_mod_buffer_resource(rstate, rbuffer, offset, vertex_buffer->stride); 525 evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i); 526 } else { 527 r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, vertex_buffer->stride); 528 r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i); 529 } 530 } 531} 532 533void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info) 534{ 535 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; 536 struct r600_resource *rbuffer; 537 struct r600_draw rdraw; 538 struct r600_drawl draw; 539 unsigned prim, mask; 540 541 if (!rctx->blit) { 542 if (rctx->have_depth_fb || rctx->have_depth_texture) 543 r600_flush_depth_textures(rctx); 544 } 545 u_vbuf_mgr_draw_begin(rctx->vbuf_mgr, info); 546 r600_vertex_buffer_update(rctx); 547 548 draw.info = *info; 549 draw.ctx = ctx; 550 draw.index_buffer = NULL; 551 if (info->indexed && rctx->index_buffer.buffer) { 552 draw.info.start += rctx->index_buffer.offset / rctx->index_buffer.index_size; 553 pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer); 554 555 r600_translate_index_buffer(rctx, &draw.index_buffer, 556 &rctx->index_buffer.index_size, 557 &draw.info.start, 558 info->count); 559 560 draw.index_size = rctx->index_buffer.index_size; 561 draw.index_buffer_offset = draw.info.start * draw.index_size; 562 draw.info.start = 0; 563 564 if (u_vbuf_resource(draw.index_buffer)->user_ptr) { 565 r600_upload_index_buffer(rctx, &draw); 566 } 567 } else { 568 draw.index_size = 0; 569 draw.index_buffer_offset = 0; 570 draw.info.index_bias = info->start; 571 } 572 573 if (r600_conv_pipe_prim(draw.info.mode, &prim)) 574 return; 575 576 if (rctx->alpha_ref_dirty) 577 r600_update_alpha_ref(rctx); 578 579 mask = 0; 580 for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) { 581 mask |= (0xF << (i * 4)); 582 } 583 584 if (rctx->vgt.id != R600_PIPE_STATE_VGT) { 585 rctx->vgt.id = R600_PIPE_STATE_VGT; 586 rctx->vgt.nregs = 0; 587 r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL); 588 r600_pipe_state_add_reg(&rctx->vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL); 589 r600_pipe_state_add_reg(&rctx->vgt, R_028400_VGT_MAX_VTX_INDX, draw.info.max_index, 0xFFFFFFFF, NULL); 590 r600_pipe_state_add_reg(&rctx->vgt, R_028404_VGT_MIN_VTX_INDX, draw.info.min_index, 0xFFFFFFFF, NULL); 591 r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, draw.info.index_bias, 0xFFFFFFFF, NULL); 592 r600_pipe_state_add_reg(&rctx->vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL); 593 r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, draw.info.start_instance, 0xFFFFFFFF, NULL); 594 r600_pipe_state_add_reg(&rctx->vgt, R_028814_PA_SU_SC_MODE_CNTL, 595 0, 596 S_028814_PROVOKING_VTX_LAST(1), NULL); 597 598 } 599 600 rctx->vgt.nregs = 0; 601 r600_pipe_state_mod_reg(&rctx->vgt, prim); 602 r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_target_mask & mask); 603 r600_pipe_state_mod_reg(&rctx->vgt, draw.info.max_index); 604 r600_pipe_state_mod_reg(&rctx->vgt, draw.info.min_index); 605 r600_pipe_state_mod_reg(&rctx->vgt, draw.info.index_bias); 606 r600_pipe_state_mod_reg(&rctx->vgt, 0); 607 r600_pipe_state_mod_reg(&rctx->vgt, draw.info.start_instance); 608 if (draw.info.mode == PIPE_PRIM_QUADS || draw.info.mode == PIPE_PRIM_QUAD_STRIP || draw.info.mode == PIPE_PRIM_POLYGON) { 609 r600_pipe_state_mod_reg(&rctx->vgt, S_028814_PROVOKING_VTX_LAST(1)); 610 } 611 612 r600_context_pipe_state_set(&rctx->ctx, &rctx->vgt); 613 614 rdraw.vgt_num_indices = draw.info.count; 615 rdraw.vgt_num_instances = draw.info.instance_count; 616 rdraw.vgt_index_type = ((draw.index_size == 4) ? 1 : 0); 617 if (R600_BIG_ENDIAN) 618 rdraw.vgt_index_type |= (draw.index_size >> 1) << 2; 619 rdraw.vgt_draw_initiator = draw.index_size ? 0 : 2; 620 rdraw.indices = NULL; 621 if (draw.index_buffer) { 622 rbuffer = (struct r600_resource*)draw.index_buffer; 623 rdraw.indices = rbuffer->bo; 624 rdraw.indices_bo_offset = draw.index_buffer_offset; 625 } 626 627 if (rctx->family >= CHIP_CEDAR) { 628 evergreen_context_draw(&rctx->ctx, &rdraw); 629 } else { 630 r600_context_draw(&rctx->ctx, &rdraw); 631 } 632 633 if (rctx->framebuffer.zsbuf) 634 { 635 struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture; 636 ((struct r600_resource_texture *)tex)->dirty_db = TRUE; 637 } 638 639 pipe_resource_reference(&draw.index_buffer, NULL); 640 641 u_vbuf_mgr_draw_end(rctx->vbuf_mgr); 642} 643 644void _r600_pipe_state_add_reg(struct r600_context *ctx, 645 struct r600_pipe_state *state, 646 u32 offset, u32 value, u32 mask, 647 u32 range_id, u32 block_id, 648 struct r600_bo *bo) 649{ 650 struct r600_range *range; 651 struct r600_block *block; 652 653 range = &ctx->range[range_id]; 654 block = range->blocks[block_id]; 655 state->regs[state->nregs].block = block; 656 state->regs[state->nregs].id = (offset - block->start_offset) >> 2; 657 658 state->regs[state->nregs].value = value; 659 state->regs[state->nregs].mask = mask; 660 state->regs[state->nregs].bo = bo; 661 662 state->nregs++; 663 assert(state->nregs < R600_BLOCK_MAX_REG); 664} 665 666void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state, 667 u32 offset, u32 value, u32 mask, 668 struct r600_bo *bo) 669{ 670 state->regs[state->nregs].id = offset; 671 state->regs[state->nregs].block = NULL; 672 state->regs[state->nregs].value = value; 673 state->regs[state->nregs].mask = mask; 674 state->regs[state->nregs].bo = bo; 675 676 state->nregs++; 677 assert(state->nregs < R600_BLOCK_MAX_REG); 678} 679