r600_hw_context.c revision 42bc0b9b9dc31a15c08d409d14d25ccf19501255
1/* 2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Jerome Glisse 25 */ 26#include "r600_hw_context_priv.h" 27#include "r600d.h" 28#include "util/u_memory.h" 29#include <errno.h> 30 31/* Get backends mask */ 32void r600_get_backend_mask(struct r600_context *ctx) 33{ 34 struct radeon_winsys_cs *cs = ctx->cs; 35 struct r600_resource *buffer; 36 uint32_t *results; 37 unsigned num_backends = ctx->screen->info.r600_num_backends; 38 unsigned i, mask = 0; 39 uint64_t va; 40 41 /* if backend_map query is supported by the kernel */ 42 if (ctx->screen->info.r600_backend_map_valid) { 43 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes; 44 unsigned backend_map = ctx->screen->info.r600_backend_map; 45 unsigned item_width, item_mask; 46 47 if (ctx->chip_class >= EVERGREEN) { 48 item_width = 4; 49 item_mask = 0x7; 50 } else { 51 item_width = 2; 52 item_mask = 0x3; 53 } 54 55 while(num_tile_pipes--) { 56 i = backend_map & item_mask; 57 mask |= (1<<i); 58 backend_map >>= item_width; 59 } 60 if (mask != 0) { 61 ctx->backend_mask = mask; 62 return; 63 } 64 } 65 66 /* otherwise backup path for older kernels */ 67 68 /* create buffer for event data */ 69 buffer = (struct r600_resource*) 70 pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM, 71 PIPE_USAGE_STAGING, ctx->max_db*16); 72 if (!buffer) 73 goto err; 74 75 va = r600_resource_va(&ctx->screen->screen, (void*)buffer); 76 77 /* initialize buffer with zeroes */ 78 results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE); 79 if (results) { 80 memset(results, 0, ctx->max_db * 4 * 4); 81 ctx->ws->buffer_unmap(buffer->buf); 82 83 /* emit EVENT_WRITE for ZPASS_DONE */ 84 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); 85 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1); 86 cs->buf[cs->cdw++] = va; 87 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; 88 89 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 90 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, buffer, RADEON_USAGE_WRITE); 91 92 /* analyze results */ 93 results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_READ); 94 if (results) { 95 for(i = 0; i < ctx->max_db; i++) { 96 /* at least highest bit will be set if backend is used */ 97 if (results[i*4 + 1]) 98 mask |= (1<<i); 99 } 100 ctx->ws->buffer_unmap(buffer->buf); 101 } 102 } 103 104 pipe_resource_reference((struct pipe_resource**)&buffer, NULL); 105 106 if (mask != 0) { 107 ctx->backend_mask = mask; 108 return; 109 } 110 111err: 112 /* fallback to old method - set num_backends lower bits to 1 */ 113 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends); 114 return; 115} 116 117void r600_context_ps_partial_flush(struct r600_context *ctx) 118{ 119 struct radeon_winsys_cs *cs = ctx->cs; 120 121 if (!(ctx->flags & R600_CONTEXT_DRAW_PENDING)) 122 return; 123 124 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); 125 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4); 126 127 ctx->flags &= ~R600_CONTEXT_DRAW_PENDING; 128} 129 130static void r600_init_block(struct r600_context *ctx, 131 struct r600_block *block, 132 const struct r600_reg *reg, int index, int nreg, 133 unsigned opcode, unsigned offset_base) 134{ 135 int i = index; 136 int j, n = nreg; 137 138 /* initialize block */ 139 if (opcode == PKT3_SET_RESOURCE) { 140 block->flags = BLOCK_FLAG_RESOURCE; 141 block->status |= R600_BLOCK_STATUS_RESOURCE_DIRTY; /* dirty all blocks at start */ 142 } else { 143 block->flags = 0; 144 block->status |= R600_BLOCK_STATUS_DIRTY; /* dirty all blocks at start */ 145 } 146 block->start_offset = reg[i].offset; 147 block->pm4[block->pm4_ndwords++] = PKT3(opcode, n, 0); 148 block->pm4[block->pm4_ndwords++] = (block->start_offset - offset_base) >> 2; 149 block->reg = &block->pm4[block->pm4_ndwords]; 150 block->pm4_ndwords += n; 151 block->nreg = n; 152 block->nreg_dirty = n; 153 LIST_INITHEAD(&block->list); 154 LIST_INITHEAD(&block->enable_list); 155 156 for (j = 0; j < n; j++) { 157 if (reg[i+j].flags & REG_FLAG_DIRTY_ALWAYS) { 158 block->flags |= REG_FLAG_DIRTY_ALWAYS; 159 } 160 if (reg[i+j].flags & REG_FLAG_ENABLE_ALWAYS) { 161 if (!(block->status & R600_BLOCK_STATUS_ENABLED)) { 162 block->status |= R600_BLOCK_STATUS_ENABLED; 163 LIST_ADDTAIL(&block->enable_list, &ctx->enable_list); 164 LIST_ADDTAIL(&block->list,&ctx->dirty); 165 } 166 } 167 if (reg[i+j].flags & REG_FLAG_FLUSH_CHANGE) { 168 block->flags |= REG_FLAG_FLUSH_CHANGE; 169 } 170 171 if (reg[i+j].flags & REG_FLAG_NEED_BO) { 172 block->nbo++; 173 assert(block->nbo < R600_BLOCK_MAX_BO); 174 block->pm4_bo_index[j] = block->nbo; 175 block->pm4[block->pm4_ndwords++] = PKT3(PKT3_NOP, 0, 0); 176 block->pm4[block->pm4_ndwords++] = 0x00000000; 177 block->reloc[block->nbo].bo_pm4_index = block->pm4_ndwords - 1; 178 } 179 if ((ctx->family > CHIP_R600) && 180 (ctx->family < CHIP_RV770) && reg[i+j].flags & REG_FLAG_RV6XX_SBU) { 181 block->pm4[block->pm4_ndwords++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0); 182 block->pm4[block->pm4_ndwords++] = reg[i+j].sbu_flags; 183 } 184 } 185 /* check that we stay in limit */ 186 assert(block->pm4_ndwords < R600_BLOCK_MAX_REG); 187} 188 189int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg, 190 unsigned opcode, unsigned offset_base) 191{ 192 struct r600_block *block; 193 struct r600_range *range; 194 int offset; 195 196 for (unsigned i = 0, n = 0; i < nreg; i += n) { 197 /* ignore new block balise */ 198 if (reg[i].offset == GROUP_FORCE_NEW_BLOCK) { 199 n = 1; 200 continue; 201 } 202 203 /* ignore regs not on R600 on R600 */ 204 if ((reg[i].flags & REG_FLAG_NOT_R600) && ctx->family == CHIP_R600) { 205 n = 1; 206 continue; 207 } 208 209 /* register that need relocation are in their own group */ 210 /* find number of consecutive registers */ 211 n = 0; 212 offset = reg[i].offset; 213 while (reg[i + n].offset == offset) { 214 n++; 215 offset += 4; 216 if ((n + i) >= nreg) 217 break; 218 if (n >= (R600_BLOCK_MAX_REG - 2)) 219 break; 220 } 221 222 /* allocate new block */ 223 block = calloc(1, sizeof(struct r600_block)); 224 if (block == NULL) { 225 return -ENOMEM; 226 } 227 ctx->nblocks++; 228 for (int j = 0; j < n; j++) { 229 range = &ctx->range[CTX_RANGE_ID(reg[i + j].offset)]; 230 /* create block table if it doesn't exist */ 231 if (!range->blocks) 232 range->blocks = calloc(1 << HASH_SHIFT, sizeof(void *)); 233 if (!range->blocks) 234 return -1; 235 236 range->blocks[CTX_BLOCK_ID(reg[i + j].offset)] = block; 237 } 238 239 r600_init_block(ctx, block, reg, i, n, opcode, offset_base); 240 241 } 242 return 0; 243} 244 245/* R600/R700 configuration */ 246static const struct r600_reg r600_config_reg_list[] = { 247 {R_008958_VGT_PRIMITIVE_TYPE, 0, 0}, 248 {R_008C04_SQ_GPR_RESOURCE_MGMT_1, REG_FLAG_ENABLE_ALWAYS | REG_FLAG_FLUSH_CHANGE, 0}, 249 {R_009508_TA_CNTL_AUX, REG_FLAG_ENABLE_ALWAYS | REG_FLAG_FLUSH_CHANGE, 0}, 250}; 251 252static const struct r600_reg r600_ctl_const_list[] = { 253 {R_03CFF4_SQ_VTX_START_INST_LOC, 0, 0}, 254}; 255 256static const struct r600_reg r600_context_reg_list[] = { 257 {R_028A4C_PA_SC_MODE_CNTL, 0, 0}, 258 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 259 {R_028040_CB_COLOR0_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(0)}, 260 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 261 {R_0280A0_CB_COLOR0_INFO, REG_FLAG_NEED_BO, 0}, 262 {R_028060_CB_COLOR0_SIZE, 0, 0}, 263 {R_028080_CB_COLOR0_VIEW, 0, 0}, 264 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 265 {R_0280E0_CB_COLOR0_FRAG, REG_FLAG_NEED_BO, 0}, 266 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 267 {R_0280C0_CB_COLOR0_TILE, REG_FLAG_NEED_BO, 0}, 268 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 269 {R_028044_CB_COLOR1_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(1)}, 270 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 271 {R_0280A4_CB_COLOR1_INFO, REG_FLAG_NEED_BO, 0}, 272 {R_028064_CB_COLOR1_SIZE, 0, 0}, 273 {R_028084_CB_COLOR1_VIEW, 0, 0}, 274 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 275 {R_0280E4_CB_COLOR1_FRAG, REG_FLAG_NEED_BO, 0}, 276 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 277 {R_0280C4_CB_COLOR1_TILE, REG_FLAG_NEED_BO, 0}, 278 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 279 {R_028048_CB_COLOR2_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(2)}, 280 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 281 {R_0280A8_CB_COLOR2_INFO, REG_FLAG_NEED_BO, 0}, 282 {R_028068_CB_COLOR2_SIZE, 0, 0}, 283 {R_028088_CB_COLOR2_VIEW, 0, 0}, 284 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 285 {R_0280E8_CB_COLOR2_FRAG, REG_FLAG_NEED_BO, 0}, 286 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 287 {R_0280C8_CB_COLOR2_TILE, REG_FLAG_NEED_BO, 0}, 288 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 289 {R_02804C_CB_COLOR3_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(3)}, 290 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 291 {R_0280AC_CB_COLOR3_INFO, REG_FLAG_NEED_BO, 0}, 292 {R_02806C_CB_COLOR3_SIZE, 0, 0}, 293 {R_02808C_CB_COLOR3_VIEW, 0, 0}, 294 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 295 {R_0280EC_CB_COLOR3_FRAG, REG_FLAG_NEED_BO, 0}, 296 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 297 {R_0280CC_CB_COLOR3_TILE, REG_FLAG_NEED_BO, 0}, 298 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 299 {R_028050_CB_COLOR4_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(4)}, 300 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 301 {R_0280B0_CB_COLOR4_INFO, REG_FLAG_NEED_BO, 0}, 302 {R_028070_CB_COLOR4_SIZE, 0, 0}, 303 {R_028090_CB_COLOR4_VIEW, 0, 0}, 304 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 305 {R_0280F0_CB_COLOR4_FRAG, REG_FLAG_NEED_BO, 0}, 306 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 307 {R_0280D0_CB_COLOR4_TILE, REG_FLAG_NEED_BO, 0}, 308 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 309 {R_028054_CB_COLOR5_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(5)}, 310 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 311 {R_0280B4_CB_COLOR5_INFO, REG_FLAG_NEED_BO, 0}, 312 {R_028074_CB_COLOR5_SIZE, 0, 0}, 313 {R_028094_CB_COLOR5_VIEW, 0, 0}, 314 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 315 {R_0280F4_CB_COLOR5_FRAG, REG_FLAG_NEED_BO, 0}, 316 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 317 {R_0280D4_CB_COLOR5_TILE, REG_FLAG_NEED_BO, 0}, 318 {R_028058_CB_COLOR6_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(6)}, 319 {R_0280B8_CB_COLOR6_INFO, REG_FLAG_NEED_BO, 0}, 320 {R_028078_CB_COLOR6_SIZE, 0, 0}, 321 {R_028098_CB_COLOR6_VIEW, 0, 0}, 322 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 323 {R_0280F8_CB_COLOR6_FRAG, REG_FLAG_NEED_BO, 0}, 324 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 325 {R_0280D8_CB_COLOR6_TILE, REG_FLAG_NEED_BO, 0}, 326 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 327 {R_02805C_CB_COLOR7_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(7)}, 328 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 329 {R_0280BC_CB_COLOR7_INFO, REG_FLAG_NEED_BO, 0}, 330 {R_02807C_CB_COLOR7_SIZE, 0, 0}, 331 {R_02809C_CB_COLOR7_VIEW, 0, 0}, 332 {R_0280FC_CB_COLOR7_FRAG, REG_FLAG_NEED_BO, 0}, 333 {R_0280DC_CB_COLOR7_TILE, REG_FLAG_NEED_BO, 0}, 334 {R_028120_CB_CLEAR_RED, 0, 0}, 335 {R_028124_CB_CLEAR_GREEN, 0, 0}, 336 {R_028128_CB_CLEAR_BLUE, 0, 0}, 337 {R_02812C_CB_CLEAR_ALPHA, 0, 0}, 338 {R_028140_ALU_CONST_BUFFER_SIZE_PS_0, REG_FLAG_DIRTY_ALWAYS, 0}, 339 {R_028144_ALU_CONST_BUFFER_SIZE_PS_1, REG_FLAG_DIRTY_ALWAYS, 0}, 340 {R_028180_ALU_CONST_BUFFER_SIZE_VS_0, REG_FLAG_DIRTY_ALWAYS, 0}, 341 {R_028184_ALU_CONST_BUFFER_SIZE_VS_1, REG_FLAG_DIRTY_ALWAYS, 0}, 342 {R_028940_ALU_CONST_CACHE_PS_0, REG_FLAG_NEED_BO, 0}, 343 {R_028944_ALU_CONST_CACHE_PS_1, REG_FLAG_NEED_BO, 0}, 344 {R_028980_ALU_CONST_CACHE_VS_0, REG_FLAG_NEED_BO, 0}, 345 {R_028984_ALU_CONST_CACHE_VS_1, REG_FLAG_NEED_BO, 0}, 346 {R_02823C_CB_SHADER_MASK, 0, 0}, 347 {R_028238_CB_TARGET_MASK, 0, 0}, 348 {R_028410_SX_ALPHA_TEST_CONTROL, 0, 0}, 349 {R_028414_CB_BLEND_RED, 0, 0}, 350 {R_028418_CB_BLEND_GREEN, 0, 0}, 351 {R_02841C_CB_BLEND_BLUE, 0, 0}, 352 {R_028420_CB_BLEND_ALPHA, 0, 0}, 353 {R_028424_CB_FOG_RED, 0, 0}, 354 {R_028428_CB_FOG_GREEN, 0, 0}, 355 {R_02842C_CB_FOG_BLUE, 0, 0}, 356 {R_028430_DB_STENCILREFMASK, 0, 0}, 357 {R_028434_DB_STENCILREFMASK_BF, 0, 0}, 358 {R_028438_SX_ALPHA_REF, 0, 0}, 359 {R_028780_CB_BLEND0_CONTROL, REG_FLAG_NOT_R600, 0}, 360 {R_028784_CB_BLEND1_CONTROL, REG_FLAG_NOT_R600, 0}, 361 {R_028788_CB_BLEND2_CONTROL, REG_FLAG_NOT_R600, 0}, 362 {R_02878C_CB_BLEND3_CONTROL, REG_FLAG_NOT_R600, 0}, 363 {R_028790_CB_BLEND4_CONTROL, REG_FLAG_NOT_R600, 0}, 364 {R_028794_CB_BLEND5_CONTROL, REG_FLAG_NOT_R600, 0}, 365 {R_028798_CB_BLEND6_CONTROL, REG_FLAG_NOT_R600, 0}, 366 {R_02879C_CB_BLEND7_CONTROL, REG_FLAG_NOT_R600, 0}, 367 {R_0287A0_CB_SHADER_CONTROL, 0, 0}, 368 {R_028800_DB_DEPTH_CONTROL, 0, 0}, 369 {R_028804_CB_BLEND_CONTROL, 0, 0}, 370 {R_028808_CB_COLOR_CONTROL, 0, 0}, 371 {R_02880C_DB_SHADER_CONTROL, 0, 0}, 372 {R_02800C_DB_DEPTH_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_DEPTH}, 373 {R_028000_DB_DEPTH_SIZE, 0, 0}, 374 {R_028004_DB_DEPTH_VIEW, 0, 0}, 375 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 376 {R_028010_DB_DEPTH_INFO, REG_FLAG_NEED_BO, 0}, 377 {R_028A6C_VGT_GS_OUT_PRIM_TYPE, 0, 0}, 378 {R_028D24_DB_HTILE_SURFACE, 0, 0}, 379 {R_028D34_DB_PREFETCH_LIMIT, 0, 0}, 380 {R_028204_PA_SC_WINDOW_SCISSOR_TL, 0, 0}, 381 {R_028208_PA_SC_WINDOW_SCISSOR_BR, 0, 0}, 382 {R_028250_PA_SC_VPORT_SCISSOR_0_TL, 0, 0}, 383 {R_028254_PA_SC_VPORT_SCISSOR_0_BR, 0, 0}, 384 {R_02843C_PA_CL_VPORT_XSCALE_0, 0, 0}, 385 {R_028440_PA_CL_VPORT_XOFFSET_0, 0, 0}, 386 {R_028444_PA_CL_VPORT_YSCALE_0, 0, 0}, 387 {R_028448_PA_CL_VPORT_YOFFSET_0, 0, 0}, 388 {R_02844C_PA_CL_VPORT_ZSCALE_0, 0, 0}, 389 {R_028450_PA_CL_VPORT_ZOFFSET_0, 0, 0}, 390 {R_0286D4_SPI_INTERP_CONTROL_0, 0, 0}, 391 {R_028810_PA_CL_CLIP_CNTL, 0, 0}, 392 {R_028814_PA_SU_SC_MODE_CNTL, 0, 0}, 393 {R_02881C_PA_CL_VS_OUT_CNTL, 0, 0}, 394 {R_028A00_PA_SU_POINT_SIZE, 0, 0}, 395 {R_028A04_PA_SU_POINT_MINMAX, 0, 0}, 396 {R_028A08_PA_SU_LINE_CNTL, 0, 0}, 397 {R_028A0C_PA_SC_LINE_STIPPLE, 0, 0}, 398 {R_028C08_PA_SU_VTX_CNTL, 0, 0}, 399 {R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 0, 0}, 400 {R_028DFC_PA_SU_POLY_OFFSET_CLAMP, 0, 0}, 401 {R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 0, 0}, 402 {R_028E04_PA_SU_POLY_OFFSET_FRONT_OFFSET, 0, 0}, 403 {R_028E08_PA_SU_POLY_OFFSET_BACK_SCALE, 0, 0}, 404 {R_028E0C_PA_SU_POLY_OFFSET_BACK_OFFSET, 0, 0}, 405 {R_028E20_PA_CL_UCP0_X, 0, 0}, 406 {R_028E24_PA_CL_UCP0_Y, 0, 0}, 407 {R_028E28_PA_CL_UCP0_Z, 0, 0}, 408 {R_028E2C_PA_CL_UCP0_W, 0, 0}, 409 {R_028E30_PA_CL_UCP1_X, 0, 0}, 410 {R_028E34_PA_CL_UCP1_Y, 0, 0}, 411 {R_028E38_PA_CL_UCP1_Z, 0, 0}, 412 {R_028E3C_PA_CL_UCP1_W, 0, 0}, 413 {R_028E40_PA_CL_UCP2_X, 0, 0}, 414 {R_028E44_PA_CL_UCP2_Y, 0, 0}, 415 {R_028E48_PA_CL_UCP2_Z, 0, 0}, 416 {R_028E4C_PA_CL_UCP2_W, 0, 0}, 417 {R_028E50_PA_CL_UCP3_X, 0, 0}, 418 {R_028E54_PA_CL_UCP3_Y, 0, 0}, 419 {R_028E58_PA_CL_UCP3_Z, 0, 0}, 420 {R_028E5C_PA_CL_UCP3_W, 0, 0}, 421 {R_028E60_PA_CL_UCP4_X, 0, 0}, 422 {R_028E64_PA_CL_UCP4_Y, 0, 0}, 423 {R_028E68_PA_CL_UCP4_Z, 0, 0}, 424 {R_028E6C_PA_CL_UCP4_W, 0, 0}, 425 {R_028E70_PA_CL_UCP5_X, 0, 0}, 426 {R_028E74_PA_CL_UCP5_Y, 0, 0}, 427 {R_028E78_PA_CL_UCP5_Z, 0, 0}, 428 {R_028E7C_PA_CL_UCP5_W, 0, 0}, 429 {R_028350_SX_MISC, 0, 0}, 430 {R_028380_SQ_VTX_SEMANTIC_0, 0, 0}, 431 {R_028384_SQ_VTX_SEMANTIC_1, 0, 0}, 432 {R_028388_SQ_VTX_SEMANTIC_2, 0, 0}, 433 {R_02838C_SQ_VTX_SEMANTIC_3, 0, 0}, 434 {R_028390_SQ_VTX_SEMANTIC_4, 0, 0}, 435 {R_028394_SQ_VTX_SEMANTIC_5, 0, 0}, 436 {R_028398_SQ_VTX_SEMANTIC_6, 0, 0}, 437 {R_02839C_SQ_VTX_SEMANTIC_7, 0, 0}, 438 {R_0283A0_SQ_VTX_SEMANTIC_8, 0, 0}, 439 {R_0283A4_SQ_VTX_SEMANTIC_9, 0, 0}, 440 {R_0283A8_SQ_VTX_SEMANTIC_10, 0, 0}, 441 {R_0283AC_SQ_VTX_SEMANTIC_11, 0, 0}, 442 {R_0283B0_SQ_VTX_SEMANTIC_12, 0, 0}, 443 {R_0283B4_SQ_VTX_SEMANTIC_13, 0, 0}, 444 {R_0283B8_SQ_VTX_SEMANTIC_14, 0, 0}, 445 {R_0283BC_SQ_VTX_SEMANTIC_15, 0, 0}, 446 {R_0283C0_SQ_VTX_SEMANTIC_16, 0, 0}, 447 {R_0283C4_SQ_VTX_SEMANTIC_17, 0, 0}, 448 {R_0283C8_SQ_VTX_SEMANTIC_18, 0, 0}, 449 {R_0283CC_SQ_VTX_SEMANTIC_19, 0, 0}, 450 {R_0283D0_SQ_VTX_SEMANTIC_20, 0, 0}, 451 {R_0283D4_SQ_VTX_SEMANTIC_21, 0, 0}, 452 {R_0283D8_SQ_VTX_SEMANTIC_22, 0, 0}, 453 {R_0283DC_SQ_VTX_SEMANTIC_23, 0, 0}, 454 {R_0283E0_SQ_VTX_SEMANTIC_24, 0, 0}, 455 {R_0283E4_SQ_VTX_SEMANTIC_25, 0, 0}, 456 {R_0283E8_SQ_VTX_SEMANTIC_26, 0, 0}, 457 {R_0283EC_SQ_VTX_SEMANTIC_27, 0, 0}, 458 {R_0283F0_SQ_VTX_SEMANTIC_28, 0, 0}, 459 {R_0283F4_SQ_VTX_SEMANTIC_29, 0, 0}, 460 {R_0283F8_SQ_VTX_SEMANTIC_30, 0, 0}, 461 {R_0283FC_SQ_VTX_SEMANTIC_31, 0, 0}, 462 {R_028614_SPI_VS_OUT_ID_0, 0, 0}, 463 {R_028618_SPI_VS_OUT_ID_1, 0, 0}, 464 {R_02861C_SPI_VS_OUT_ID_2, 0, 0}, 465 {R_028620_SPI_VS_OUT_ID_3, 0, 0}, 466 {R_028624_SPI_VS_OUT_ID_4, 0, 0}, 467 {R_028628_SPI_VS_OUT_ID_5, 0, 0}, 468 {R_02862C_SPI_VS_OUT_ID_6, 0, 0}, 469 {R_028630_SPI_VS_OUT_ID_7, 0, 0}, 470 {R_028634_SPI_VS_OUT_ID_8, 0, 0}, 471 {R_028638_SPI_VS_OUT_ID_9, 0, 0}, 472 {R_0286C4_SPI_VS_OUT_CONFIG, 0, 0}, 473 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 474 {R_028858_SQ_PGM_START_VS, REG_FLAG_NEED_BO, 0}, 475 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 476 {R_028868_SQ_PGM_RESOURCES_VS, 0, 0}, 477 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 478 {R_028894_SQ_PGM_START_FS, REG_FLAG_NEED_BO, 0}, 479 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 480 {R_0288A4_SQ_PGM_RESOURCES_FS, 0, 0}, 481 {R_0288DC_SQ_PGM_CF_OFFSET_FS, 0, 0}, 482 {R_028644_SPI_PS_INPUT_CNTL_0, 0, 0}, 483 {R_028648_SPI_PS_INPUT_CNTL_1, 0, 0}, 484 {R_02864C_SPI_PS_INPUT_CNTL_2, 0, 0}, 485 {R_028650_SPI_PS_INPUT_CNTL_3, 0, 0}, 486 {R_028654_SPI_PS_INPUT_CNTL_4, 0, 0}, 487 {R_028658_SPI_PS_INPUT_CNTL_5, 0, 0}, 488 {R_02865C_SPI_PS_INPUT_CNTL_6, 0, 0}, 489 {R_028660_SPI_PS_INPUT_CNTL_7, 0, 0}, 490 {R_028664_SPI_PS_INPUT_CNTL_8, 0, 0}, 491 {R_028668_SPI_PS_INPUT_CNTL_9, 0, 0}, 492 {R_02866C_SPI_PS_INPUT_CNTL_10, 0, 0}, 493 {R_028670_SPI_PS_INPUT_CNTL_11, 0, 0}, 494 {R_028674_SPI_PS_INPUT_CNTL_12, 0, 0}, 495 {R_028678_SPI_PS_INPUT_CNTL_13, 0, 0}, 496 {R_02867C_SPI_PS_INPUT_CNTL_14, 0, 0}, 497 {R_028680_SPI_PS_INPUT_CNTL_15, 0, 0}, 498 {R_028684_SPI_PS_INPUT_CNTL_16, 0, 0}, 499 {R_028688_SPI_PS_INPUT_CNTL_17, 0, 0}, 500 {R_02868C_SPI_PS_INPUT_CNTL_18, 0, 0}, 501 {R_028690_SPI_PS_INPUT_CNTL_19, 0, 0}, 502 {R_028694_SPI_PS_INPUT_CNTL_20, 0, 0}, 503 {R_028698_SPI_PS_INPUT_CNTL_21, 0, 0}, 504 {R_02869C_SPI_PS_INPUT_CNTL_22, 0, 0}, 505 {R_0286A0_SPI_PS_INPUT_CNTL_23, 0, 0}, 506 {R_0286A4_SPI_PS_INPUT_CNTL_24, 0, 0}, 507 {R_0286A8_SPI_PS_INPUT_CNTL_25, 0, 0}, 508 {R_0286AC_SPI_PS_INPUT_CNTL_26, 0, 0}, 509 {R_0286B0_SPI_PS_INPUT_CNTL_27, 0, 0}, 510 {R_0286B4_SPI_PS_INPUT_CNTL_28, 0, 0}, 511 {R_0286B8_SPI_PS_INPUT_CNTL_29, 0, 0}, 512 {R_0286BC_SPI_PS_INPUT_CNTL_30, 0, 0}, 513 {R_0286C0_SPI_PS_INPUT_CNTL_31, 0, 0}, 514 {R_0286CC_SPI_PS_IN_CONTROL_0, 0, 0}, 515 {R_0286D0_SPI_PS_IN_CONTROL_1, 0, 0}, 516 {R_0286D8_SPI_INPUT_Z, 0, 0}, 517 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 518 {R_028840_SQ_PGM_START_PS, REG_FLAG_NEED_BO, 0}, 519 {GROUP_FORCE_NEW_BLOCK, 0, 0}, 520 {R_028850_SQ_PGM_RESOURCES_PS, 0, 0}, 521 {R_028854_SQ_PGM_EXPORTS_PS, 0, 0}, 522 {R_028408_VGT_INDX_OFFSET, 0, 0}, 523 {R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 0, 0}, 524 {R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, 0, 0}, 525}; 526 527/* SHADER RESOURCE R600/R700 */ 528int r600_resource_init(struct r600_context *ctx, struct r600_range *range, unsigned offset, unsigned nblocks, unsigned stride, struct r600_reg *reg, int nreg, unsigned offset_base) 529{ 530 int i; 531 struct r600_block *block; 532 range->blocks = calloc(nblocks, sizeof(struct r600_block *)); 533 if (range->blocks == NULL) 534 return -ENOMEM; 535 536 reg[0].offset += offset; 537 for (i = 0; i < nblocks; i++) { 538 block = calloc(1, sizeof(struct r600_block)); 539 if (block == NULL) { 540 return -ENOMEM; 541 } 542 ctx->nblocks++; 543 range->blocks[i] = block; 544 r600_init_block(ctx, block, reg, 0, nreg, PKT3_SET_RESOURCE, offset_base); 545 546 reg[0].offset += stride; 547 } 548 return 0; 549} 550 551 552static int r600_resource_range_init(struct r600_context *ctx, struct r600_range *range, unsigned offset, unsigned nblocks, unsigned stride) 553{ 554 struct r600_reg r600_shader_resource[] = { 555 {R_038000_RESOURCE0_WORD0, REG_FLAG_NEED_BO, 0}, 556 {R_038004_RESOURCE0_WORD1, REG_FLAG_NEED_BO, 0}, 557 {R_038008_RESOURCE0_WORD2, 0, 0}, 558 {R_03800C_RESOURCE0_WORD3, 0, 0}, 559 {R_038010_RESOURCE0_WORD4, 0, 0}, 560 {R_038014_RESOURCE0_WORD5, 0, 0}, 561 {R_038018_RESOURCE0_WORD6, 0, 0}, 562 }; 563 unsigned nreg = Elements(r600_shader_resource); 564 565 return r600_resource_init(ctx, range, offset, nblocks, stride, r600_shader_resource, nreg, R600_RESOURCE_OFFSET); 566} 567 568/* SHADER SAMPLER R600/R700/EG/CM */ 569int r600_state_sampler_init(struct r600_context *ctx, uint32_t offset) 570{ 571 struct r600_reg r600_shader_sampler[] = { 572 {R_03C000_SQ_TEX_SAMPLER_WORD0_0, 0, 0}, 573 {R_03C004_SQ_TEX_SAMPLER_WORD1_0, 0, 0}, 574 {R_03C008_SQ_TEX_SAMPLER_WORD2_0, 0, 0}, 575 }; 576 unsigned nreg = Elements(r600_shader_sampler); 577 578 for (int i = 0; i < nreg; i++) { 579 r600_shader_sampler[i].offset += offset; 580 } 581 return r600_context_add_block(ctx, r600_shader_sampler, nreg, PKT3_SET_SAMPLER, R600_SAMPLER_OFFSET); 582} 583 584/* SHADER SAMPLER BORDER R600/R700 */ 585static int r600_state_sampler_border_init(struct r600_context *ctx, uint32_t offset) 586{ 587 struct r600_reg r600_shader_sampler_border[] = { 588 {R_00A400_TD_PS_SAMPLER0_BORDER_RED, 0, 0}, 589 {R_00A404_TD_PS_SAMPLER0_BORDER_GREEN, 0, 0}, 590 {R_00A408_TD_PS_SAMPLER0_BORDER_BLUE, 0, 0}, 591 {R_00A40C_TD_PS_SAMPLER0_BORDER_ALPHA, 0, 0}, 592 }; 593 unsigned nreg = Elements(r600_shader_sampler_border); 594 595 for (int i = 0; i < nreg; i++) { 596 r600_shader_sampler_border[i].offset += offset; 597 } 598 return r600_context_add_block(ctx, r600_shader_sampler_border, nreg, PKT3_SET_CONFIG_REG, R600_CONFIG_REG_OFFSET); 599} 600 601static int r600_loop_const_init(struct r600_context *ctx, uint32_t offset) 602{ 603 unsigned nreg = 32; 604 struct r600_reg r600_loop_consts[32]; 605 int i; 606 607 for (i = 0; i < nreg; i++) { 608 r600_loop_consts[i].offset = R600_LOOP_CONST_OFFSET + ((offset + i) * 4); 609 r600_loop_consts[i].flags = REG_FLAG_DIRTY_ALWAYS; 610 r600_loop_consts[i].sbu_flags = 0; 611 } 612 return r600_context_add_block(ctx, r600_loop_consts, nreg, PKT3_SET_LOOP_CONST, R600_LOOP_CONST_OFFSET); 613} 614 615static void r600_free_resource_range(struct r600_context *ctx, struct r600_range *range, int nblocks) 616{ 617 struct r600_block *block; 618 int i; 619 620 if (!range->blocks) { 621 return; /* nothing to do */ 622 } 623 624 for (i = 0; i < nblocks; i++) { 625 block = range->blocks[i]; 626 if (block) { 627 for (int k = 1; k <= block->nbo; k++) 628 pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL); 629 free(block); 630 } 631 } 632 free(range->blocks); 633} 634 635/* initialize */ 636void r600_context_fini(struct r600_context *ctx) 637{ 638 struct r600_block *block; 639 struct r600_range *range; 640 641 if (ctx->range) { 642 for (int i = 0; i < NUM_RANGES; i++) { 643 if (!ctx->range[i].blocks) 644 continue; 645 for (int j = 0; j < (1 << HASH_SHIFT); j++) { 646 block = ctx->range[i].blocks[j]; 647 if (block) { 648 for (int k = 0, offset = block->start_offset; k < block->nreg; k++, offset += 4) { 649 range = &ctx->range[CTX_RANGE_ID(offset)]; 650 range->blocks[CTX_BLOCK_ID(offset)] = NULL; 651 } 652 for (int k = 1; k <= block->nbo; k++) { 653 pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL); 654 } 655 free(block); 656 } 657 } 658 free(ctx->range[i].blocks); 659 } 660 } 661 r600_free_resource_range(ctx, &ctx->ps_resources, ctx->num_ps_resources); 662 r600_free_resource_range(ctx, &ctx->vs_resources, ctx->num_vs_resources); 663 r600_free_resource_range(ctx, &ctx->fs_resources, ctx->num_fs_resources); 664 free(ctx->blocks); 665} 666 667static void r600_add_resource_block(struct r600_context *ctx, struct r600_range *range, int num_blocks, int *index) 668{ 669 int c = *index; 670 for (int j = 0; j < num_blocks; j++) { 671 if (!range->blocks[j]) 672 continue; 673 674 ctx->blocks[c++] = range->blocks[j]; 675 } 676 *index = c; 677} 678 679int r600_setup_block_table(struct r600_context *ctx) 680{ 681 /* setup block table */ 682 int c = 0; 683 ctx->blocks = calloc(ctx->nblocks, sizeof(void*)); 684 if (!ctx->blocks) 685 return -ENOMEM; 686 for (int i = 0; i < NUM_RANGES; i++) { 687 if (!ctx->range[i].blocks) 688 continue; 689 for (int j = 0, add; j < (1 << HASH_SHIFT); j++) { 690 if (!ctx->range[i].blocks[j]) 691 continue; 692 693 add = 1; 694 for (int k = 0; k < c; k++) { 695 if (ctx->blocks[k] == ctx->range[i].blocks[j]) { 696 add = 0; 697 break; 698 } 699 } 700 if (add) { 701 assert(c < ctx->nblocks); 702 ctx->blocks[c++] = ctx->range[i].blocks[j]; 703 j += (ctx->range[i].blocks[j]->nreg) - 1; 704 } 705 } 706 } 707 708 r600_add_resource_block(ctx, &ctx->ps_resources, ctx->num_ps_resources, &c); 709 r600_add_resource_block(ctx, &ctx->vs_resources, ctx->num_vs_resources, &c); 710 r600_add_resource_block(ctx, &ctx->fs_resources, ctx->num_fs_resources, &c); 711 return 0; 712} 713 714int r600_context_init(struct r600_context *ctx) 715{ 716 int r; 717 718 /* add blocks */ 719 r = r600_context_add_block(ctx, r600_config_reg_list, 720 Elements(r600_config_reg_list), PKT3_SET_CONFIG_REG, R600_CONFIG_REG_OFFSET); 721 if (r) 722 goto out_err; 723 r = r600_context_add_block(ctx, r600_context_reg_list, 724 Elements(r600_context_reg_list), PKT3_SET_CONTEXT_REG, R600_CONTEXT_REG_OFFSET); 725 if (r) 726 goto out_err; 727 r = r600_context_add_block(ctx, r600_ctl_const_list, 728 Elements(r600_ctl_const_list), PKT3_SET_CTL_CONST, R600_CTL_CONST_OFFSET); 729 if (r) 730 goto out_err; 731 732 /* PS SAMPLER BORDER */ 733 for (int j = 0, offset = 0; j < 18; j++, offset += 0x10) { 734 r = r600_state_sampler_border_init(ctx, offset); 735 if (r) 736 goto out_err; 737 } 738 739 /* VS SAMPLER BORDER */ 740 for (int j = 0, offset = 0x200; j < 18; j++, offset += 0x10) { 741 r = r600_state_sampler_border_init(ctx, offset); 742 if (r) 743 goto out_err; 744 } 745 /* PS SAMPLER */ 746 for (int j = 0, offset = 0; j < 18; j++, offset += 0xC) { 747 r = r600_state_sampler_init(ctx, offset); 748 if (r) 749 goto out_err; 750 } 751 /* VS SAMPLER */ 752 for (int j = 0, offset = 0xD8; j < 18; j++, offset += 0xC) { 753 r = r600_state_sampler_init(ctx, offset); 754 if (r) 755 goto out_err; 756 } 757 758 ctx->num_ps_resources = 160; 759 ctx->num_vs_resources = 160; 760 ctx->num_fs_resources = 16; 761 r = r600_resource_range_init(ctx, &ctx->ps_resources, 0, 160, 0x1c); 762 if (r) 763 goto out_err; 764 r = r600_resource_range_init(ctx, &ctx->vs_resources, 0x1180, 160, 0x1c); 765 if (r) 766 goto out_err; 767 r = r600_resource_range_init(ctx, &ctx->fs_resources, 0x2300, 16, 0x1c); 768 if (r) 769 goto out_err; 770 771 /* PS loop const */ 772 r600_loop_const_init(ctx, 0); 773 /* VS loop const */ 774 r600_loop_const_init(ctx, 32); 775 776 r = r600_setup_block_table(ctx); 777 if (r) 778 goto out_err; 779 780 ctx->max_db = 4; 781 return 0; 782out_err: 783 r600_context_fini(ctx); 784 return r; 785} 786 787void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, 788 boolean count_draw_in) 789{ 790 struct r600_atom *state; 791 792 /* The number of dwords we already used in the CS so far. */ 793 num_dw += ctx->cs->cdw; 794 795 if (count_draw_in) { 796 /* The number of dwords all the dirty states would take. */ 797 LIST_FOR_EACH_ENTRY(state, &ctx->dirty_states, head) { 798 num_dw += state->num_dw; 799 } 800 801 num_dw += ctx->pm4_dirty_cdwords; 802 803 /* The upper-bound of how much a draw command would take. */ 804 num_dw += R600_MAX_DRAW_CS_DWORDS; 805 } 806 807 /* Count in queries_suspend. */ 808 num_dw += ctx->num_cs_dw_nontimer_queries_suspend; 809 num_dw += ctx->num_cs_dw_timer_queries_suspend; 810 811 /* Count in streamout_end at the end of CS. */ 812 num_dw += ctx->num_cs_dw_streamout_end; 813 814 /* Count in render_condition(NULL) at the end of CS. */ 815 if (ctx->predicate_drawing) { 816 num_dw += 3; 817 } 818 819 /* Count in framebuffer cache flushes at the end of CS. */ 820 num_dw += 7; /* one SURFACE_SYNC and CACHE_FLUSH_AND_INV (r6xx-only) */ 821 822 /* Save 16 dwords for the fence mechanism. */ 823 num_dw += 16; 824 825 /* Flush if there's not enough space. */ 826 if (num_dw > RADEON_MAX_CMDBUF_DWORDS) { 827 r600_flush(&ctx->context, NULL, RADEON_FLUSH_ASYNC); 828 } 829} 830 831void r600_context_dirty_block(struct r600_context *ctx, 832 struct r600_block *block, 833 int dirty, int index) 834{ 835 if ((index + 1) > block->nreg_dirty) 836 block->nreg_dirty = index + 1; 837 838 if ((dirty != (block->status & R600_BLOCK_STATUS_DIRTY)) || !(block->status & R600_BLOCK_STATUS_ENABLED)) { 839 block->status |= R600_BLOCK_STATUS_DIRTY; 840 ctx->pm4_dirty_cdwords += block->pm4_ndwords; 841 if (!(block->status & R600_BLOCK_STATUS_ENABLED)) { 842 block->status |= R600_BLOCK_STATUS_ENABLED; 843 LIST_ADDTAIL(&block->enable_list, &ctx->enable_list); 844 } 845 LIST_ADDTAIL(&block->list,&ctx->dirty); 846 847 if (block->flags & REG_FLAG_FLUSH_CHANGE) { 848 r600_context_ps_partial_flush(ctx); 849 } 850 } 851} 852 853void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state) 854{ 855 struct r600_block *block; 856 int dirty; 857 for (int i = 0; i < state->nregs; i++) { 858 unsigned id, reloc_id; 859 struct r600_pipe_reg *reg = &state->regs[i]; 860 861 block = reg->block; 862 id = reg->id; 863 864 dirty = block->status & R600_BLOCK_STATUS_DIRTY; 865 866 if (reg->value != block->reg[id]) { 867 block->reg[id] = reg->value; 868 dirty |= R600_BLOCK_STATUS_DIRTY; 869 } 870 if (block->flags & REG_FLAG_DIRTY_ALWAYS) 871 dirty |= R600_BLOCK_STATUS_DIRTY; 872 if (block->pm4_bo_index[id]) { 873 /* find relocation */ 874 reloc_id = block->pm4_bo_index[id]; 875 pipe_resource_reference((struct pipe_resource**)&block->reloc[reloc_id].bo, ®->bo->b.b.b); 876 block->reloc[reloc_id].bo_usage = reg->bo_usage; 877 /* always force dirty for relocs for now */ 878 dirty |= R600_BLOCK_STATUS_DIRTY; 879 } 880 881 if (dirty) 882 r600_context_dirty_block(ctx, block, dirty, id); 883 } 884} 885 886static void r600_context_dirty_resource_block(struct r600_context *ctx, 887 struct r600_block *block, 888 int dirty, int index) 889{ 890 block->nreg_dirty = index + 1; 891 892 if ((dirty != (block->status & R600_BLOCK_STATUS_RESOURCE_DIRTY)) || !(block->status & R600_BLOCK_STATUS_ENABLED)) { 893 block->status |= R600_BLOCK_STATUS_RESOURCE_DIRTY; 894 ctx->pm4_dirty_cdwords += block->pm4_ndwords; 895 if (!(block->status & R600_BLOCK_STATUS_ENABLED)) { 896 block->status |= R600_BLOCK_STATUS_ENABLED; 897 LIST_ADDTAIL(&block->enable_list, &ctx->enable_list); 898 } 899 LIST_ADDTAIL(&block->list,&ctx->resource_dirty); 900 } 901} 902 903void r600_context_pipe_state_set_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, struct r600_block *block) 904{ 905 int dirty; 906 int num_regs = ctx->chip_class >= EVERGREEN ? 8 : 7; 907 boolean is_vertex; 908 909 if (state == NULL) { 910 block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_RESOURCE_DIRTY); 911 pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, NULL); 912 pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, NULL); 913 LIST_DELINIT(&block->list); 914 LIST_DELINIT(&block->enable_list); 915 return; 916 } 917 918 is_vertex = ((state->val[num_regs-1] & 0xc0000000) == 0xc0000000); 919 dirty = block->status & R600_BLOCK_STATUS_RESOURCE_DIRTY; 920 921 if (memcmp(block->reg, state->val, num_regs*4)) { 922 memcpy(block->reg, state->val, num_regs * 4); 923 dirty |= R600_BLOCK_STATUS_RESOURCE_DIRTY; 924 } 925 926 /* if no BOs on block, force dirty */ 927 if (!block->reloc[1].bo || !block->reloc[2].bo) 928 dirty |= R600_BLOCK_STATUS_RESOURCE_DIRTY; 929 930 if (!dirty) { 931 if (is_vertex) { 932 if (block->reloc[1].bo->buf != state->bo[0]->buf) 933 dirty |= R600_BLOCK_STATUS_RESOURCE_DIRTY; 934 } else { 935 if ((block->reloc[1].bo->buf != state->bo[0]->buf) || 936 (block->reloc[2].bo->buf != state->bo[1]->buf)) 937 dirty |= R600_BLOCK_STATUS_RESOURCE_DIRTY; 938 } 939 } 940 941 if (dirty) { 942 if (is_vertex) { 943 /* VERTEX RESOURCE, we preted there is 2 bo to relocate so 944 * we have single case btw VERTEX & TEXTURE resource 945 */ 946 pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, &state->bo[0]->b.b.b); 947 block->reloc[1].bo_usage = state->bo_usage[0]; 948 pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, NULL); 949 } else { 950 /* TEXTURE RESOURCE */ 951 pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, &state->bo[0]->b.b.b); 952 block->reloc[1].bo_usage = state->bo_usage[0]; 953 pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, &state->bo[1]->b.b.b); 954 block->reloc[2].bo_usage = state->bo_usage[1]; 955 } 956 957 if (is_vertex) 958 block->status |= R600_BLOCK_STATUS_RESOURCE_VERTEX; 959 else 960 block->status &= ~R600_BLOCK_STATUS_RESOURCE_VERTEX; 961 962 r600_context_dirty_resource_block(ctx, block, dirty, num_regs - 1); 963 } 964} 965 966void r600_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid) 967{ 968 struct r600_block *block = ctx->ps_resources.blocks[rid]; 969 970 r600_context_pipe_state_set_resource(ctx, state, block); 971} 972 973void r600_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid) 974{ 975 struct r600_block *block = ctx->vs_resources.blocks[rid]; 976 977 r600_context_pipe_state_set_resource(ctx, state, block); 978} 979 980void r600_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid) 981{ 982 struct r600_block *block = ctx->fs_resources.blocks[rid]; 983 984 r600_context_pipe_state_set_resource(ctx, state, block); 985} 986 987void r600_context_pipe_state_set_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned offset) 988{ 989 struct r600_range *range; 990 struct r600_block *block; 991 int i; 992 int dirty; 993 994 range = &ctx->range[CTX_RANGE_ID(offset)]; 995 block = range->blocks[CTX_BLOCK_ID(offset)]; 996 if (state == NULL) { 997 block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_DIRTY); 998 LIST_DELINIT(&block->list); 999 LIST_DELINIT(&block->enable_list); 1000 return; 1001 } 1002 dirty = block->status & R600_BLOCK_STATUS_DIRTY; 1003 1004 for (i = 0; i < 3; i++) { 1005 if (block->reg[i] != state->regs[i].value) { 1006 block->reg[i] = state->regs[i].value; 1007 dirty |= R600_BLOCK_STATUS_DIRTY; 1008 } 1009 } 1010 1011 if (dirty) 1012 r600_context_dirty_block(ctx, block, dirty, 2); 1013} 1014 1015static inline void r600_context_pipe_state_set_sampler_border(struct r600_context *ctx, struct r600_pipe_state *state, unsigned offset) 1016{ 1017 struct r600_range *range; 1018 struct r600_block *block; 1019 int i; 1020 int dirty; 1021 1022 range = &ctx->range[CTX_RANGE_ID(offset)]; 1023 block = range->blocks[CTX_BLOCK_ID(offset)]; 1024 if (state == NULL) { 1025 block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_DIRTY); 1026 LIST_DELINIT(&block->list); 1027 LIST_DELINIT(&block->enable_list); 1028 return; 1029 } 1030 if (state->nregs <= 3) { 1031 return; 1032 } 1033 dirty = block->status & R600_BLOCK_STATUS_DIRTY; 1034 for (i = 0; i < 4; i++) { 1035 if (block->reg[i] != state->regs[i + 3].value) { 1036 block->reg[i] = state->regs[i + 3].value; 1037 dirty |= R600_BLOCK_STATUS_DIRTY; 1038 } 1039 } 1040 1041 /* We have to flush the shaders before we change the border color 1042 * registers, or previous draw commands that haven't completed yet 1043 * will end up using the new border color. */ 1044 if (dirty & R600_BLOCK_STATUS_DIRTY) 1045 r600_context_ps_partial_flush(ctx); 1046 if (dirty) 1047 r600_context_dirty_block(ctx, block, dirty, 3); 1048} 1049 1050void r600_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id) 1051{ 1052 unsigned offset; 1053 1054 offset = R_03C000_SQ_TEX_SAMPLER_WORD0_0 + 12*id; 1055 r600_context_pipe_state_set_sampler(ctx, state, offset); 1056 offset = R_00A400_TD_PS_SAMPLER0_BORDER_RED + 16*id; 1057 r600_context_pipe_state_set_sampler_border(ctx, state, offset); 1058} 1059 1060void r600_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id) 1061{ 1062 unsigned offset; 1063 1064 offset = R_03C000_SQ_TEX_SAMPLER_WORD0_0 + 12*(id + 18); 1065 r600_context_pipe_state_set_sampler(ctx, state, offset); 1066 offset = R_00A600_TD_VS_SAMPLER0_BORDER_RED + 16*id; 1067 r600_context_pipe_state_set_sampler_border(ctx, state, offset); 1068} 1069 1070void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block) 1071{ 1072 struct radeon_winsys_cs *cs = ctx->cs; 1073 int optional = block->nbo == 0 && !(block->flags & REG_FLAG_DIRTY_ALWAYS); 1074 int cp_dwords = block->pm4_ndwords, start_dword = 0; 1075 int new_dwords = 0; 1076 int nbo = block->nbo; 1077 1078 if (block->nreg_dirty == 0 && optional) { 1079 goto out; 1080 } 1081 1082 if (nbo) { 1083 for (int j = 0; j < block->nreg; j++) { 1084 if (block->pm4_bo_index[j]) { 1085 /* find relocation */ 1086 struct r600_block_reloc *reloc = &block->reloc[block->pm4_bo_index[j]]; 1087 if (reloc->bo) { 1088 block->pm4[reloc->bo_pm4_index] = 1089 r600_context_bo_reloc(ctx, reloc->bo, reloc->bo_usage); 1090 } else { 1091 block->pm4[reloc->bo_pm4_index] = 0; 1092 } 1093 nbo--; 1094 if (nbo == 0) 1095 break; 1096 1097 } 1098 } 1099 } 1100 1101 optional &= (block->nreg_dirty != block->nreg); 1102 if (optional) { 1103 new_dwords = block->nreg_dirty; 1104 start_dword = cs->cdw; 1105 cp_dwords = new_dwords + 2; 1106 } 1107 memcpy(&cs->buf[cs->cdw], block->pm4, cp_dwords * 4); 1108 cs->cdw += cp_dwords; 1109 1110 if (optional) { 1111 uint32_t newword; 1112 1113 newword = cs->buf[start_dword]; 1114 newword &= PKT_COUNT_C; 1115 newword |= PKT_COUNT_S(new_dwords); 1116 cs->buf[start_dword] = newword; 1117 } 1118out: 1119 block->status ^= R600_BLOCK_STATUS_DIRTY; 1120 block->nreg_dirty = 0; 1121 LIST_DELINIT(&block->list); 1122} 1123 1124void r600_context_block_resource_emit_dirty(struct r600_context *ctx, struct r600_block *block) 1125{ 1126 struct radeon_winsys_cs *cs = ctx->cs; 1127 int cp_dwords = block->pm4_ndwords; 1128 int nbo = block->nbo; 1129 1130 if (block->status & R600_BLOCK_STATUS_RESOURCE_VERTEX) { 1131 nbo = 1; 1132 cp_dwords -= 2; /* don't copy the second NOP */ 1133 } 1134 1135 for (int j = 0; j < nbo; j++) { 1136 if (block->pm4_bo_index[j]) { 1137 /* find relocation */ 1138 struct r600_block_reloc *reloc = &block->reloc[block->pm4_bo_index[j]]; 1139 block->pm4[reloc->bo_pm4_index] = 1140 r600_context_bo_reloc(ctx, reloc->bo, reloc->bo_usage); 1141 } 1142 } 1143 1144 memcpy(&cs->buf[cs->cdw], block->pm4, cp_dwords * 4); 1145 cs->cdw += cp_dwords; 1146 1147 block->status ^= R600_BLOCK_STATUS_RESOURCE_DIRTY; 1148 block->nreg_dirty = 0; 1149 LIST_DELINIT(&block->list); 1150} 1151 1152void r600_inval_shader_cache(struct r600_context *ctx) 1153{ 1154 ctx->surface_sync_cmd.flush_flags |= S_0085F0_SH_ACTION_ENA(1); 1155 r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom); 1156} 1157 1158void r600_inval_texture_cache(struct r600_context *ctx) 1159{ 1160 ctx->surface_sync_cmd.flush_flags |= S_0085F0_TC_ACTION_ENA(1); 1161 r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom); 1162} 1163 1164void r600_inval_vertex_cache(struct r600_context *ctx) 1165{ 1166 if (ctx->has_vertex_cache) { 1167 ctx->surface_sync_cmd.flush_flags |= S_0085F0_VC_ACTION_ENA(1); 1168 } else { 1169 /* Some GPUs don't have the vertex cache and must use the texture cache instead. */ 1170 ctx->surface_sync_cmd.flush_flags |= S_0085F0_TC_ACTION_ENA(1); 1171 } 1172 r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom); 1173} 1174 1175void r600_flush_framebuffer(struct r600_context *ctx, bool flush_now) 1176{ 1177 if (!(ctx->flags & R600_CONTEXT_DST_CACHES_DIRTY)) 1178 return; 1179 1180 ctx->surface_sync_cmd.flush_flags |= 1181 r600_get_cb_flush_flags(ctx) | 1182 (ctx->framebuffer.zsbuf ? S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1) : 0); 1183 1184 if (flush_now) { 1185 r600_emit_atom(ctx, &ctx->surface_sync_cmd.atom); 1186 } else { 1187 r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom); 1188 } 1189 1190 /* Also add a complete cache flush to work around broken flushing on R6xx. */ 1191 if (ctx->chip_class == R600) { 1192 if (flush_now) { 1193 r600_emit_atom(ctx, &ctx->r6xx_flush_and_inv_cmd); 1194 } else { 1195 r600_atom_dirty(ctx, &ctx->r6xx_flush_and_inv_cmd); 1196 } 1197 } 1198 1199 ctx->flags &= ~R600_CONTEXT_DST_CACHES_DIRTY; 1200} 1201 1202void r600_context_flush(struct r600_context *ctx, unsigned flags) 1203{ 1204 struct radeon_winsys_cs *cs = ctx->cs; 1205 struct r600_block *enable_block = NULL; 1206 bool timer_queries_suspended = false; 1207 bool nontimer_queries_suspended = false; 1208 bool streamout_suspended = false; 1209 1210 if (cs->cdw == ctx->start_cs_cmd.atom.num_dw) 1211 return; 1212 1213 /* suspend queries */ 1214 if (ctx->num_cs_dw_timer_queries_suspend) { 1215 r600_suspend_timer_queries(ctx); 1216 timer_queries_suspended = true; 1217 } 1218 if (ctx->num_cs_dw_nontimer_queries_suspend) { 1219 r600_suspend_nontimer_queries(ctx); 1220 nontimer_queries_suspended = true; 1221 } 1222 1223 if (ctx->num_cs_dw_streamout_end) { 1224 r600_context_streamout_end(ctx); 1225 streamout_suspended = true; 1226 } 1227 1228 r600_flush_framebuffer(ctx, true); 1229 1230 /* partial flush is needed to avoid lockups on some chips with user fences */ 1231 r600_context_ps_partial_flush(ctx); 1232 1233 /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */ 1234 if (ctx->chip_class <= R700) { 1235 r600_write_context_reg(cs, R_028350_SX_MISC, 0); 1236 } 1237 1238 /* force to keep tiling flags */ 1239 flags |= RADEON_FLUSH_KEEP_TILING_FLAGS; 1240 1241 /* Flush the CS. */ 1242 ctx->ws->cs_flush(ctx->cs, flags); 1243 1244 ctx->pm4_dirty_cdwords = 0; 1245 ctx->flags = 0; 1246 1247 r600_emit_atom(ctx, &ctx->start_cs_cmd.atom); 1248 r600_atom_dirty(ctx, &ctx->db_misc_state.atom); 1249 1250 if (streamout_suspended) { 1251 ctx->streamout_start = TRUE; 1252 ctx->streamout_append_bitmask = ~0; 1253 } 1254 1255 /* resume queries */ 1256 if (timer_queries_suspended) { 1257 r600_resume_timer_queries(ctx); 1258 } 1259 if (nontimer_queries_suspended) { 1260 r600_resume_nontimer_queries(ctx); 1261 } 1262 1263 /* set all valid group as dirty so they get reemited on 1264 * next draw command 1265 */ 1266 LIST_FOR_EACH_ENTRY(enable_block, &ctx->enable_list, enable_list) { 1267 if (!(enable_block->flags & BLOCK_FLAG_RESOURCE)) { 1268 if(!(enable_block->status & R600_BLOCK_STATUS_DIRTY)) { 1269 LIST_ADDTAIL(&enable_block->list,&ctx->dirty); 1270 enable_block->status |= R600_BLOCK_STATUS_DIRTY; 1271 } 1272 } else { 1273 if(!(enable_block->status & R600_BLOCK_STATUS_RESOURCE_DIRTY)) { 1274 LIST_ADDTAIL(&enable_block->list,&ctx->resource_dirty); 1275 enable_block->status |= R600_BLOCK_STATUS_RESOURCE_DIRTY; 1276 } 1277 } 1278 ctx->pm4_dirty_cdwords += enable_block->pm4_ndwords; 1279 enable_block->nreg_dirty = enable_block->nreg; 1280 } 1281} 1282 1283void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value) 1284{ 1285 struct radeon_winsys_cs *cs = ctx->cs; 1286 uint64_t va; 1287 1288 r600_need_cs_space(ctx, 10, FALSE); 1289 1290 va = r600_resource_va(&ctx->screen->screen, (void*)fence_bo); 1291 va = va + (offset << 2); 1292 1293 r600_context_ps_partial_flush(ctx); 1294 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0); 1295 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5); 1296 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* ADDRESS_LO */ 1297 /* DATA_SEL | INT_EN | ADDRESS_HI */ 1298 cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF); 1299 cs->buf[cs->cdw++] = value; /* DATA_LO */ 1300 cs->buf[cs->cdw++] = 0; /* DATA_HI */ 1301 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 1302 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, fence_bo, RADEON_USAGE_WRITE); 1303} 1304 1305static void r600_flush_vgt_streamout(struct r600_context *ctx) 1306{ 1307 struct radeon_winsys_cs *cs = ctx->cs; 1308 1309 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONFIG_REG, 1, 0); 1310 cs->buf[cs->cdw++] = (R_008490_CP_STRMOUT_CNTL - R600_CONFIG_REG_OFFSET) >> 2; 1311 cs->buf[cs->cdw++] = 0; 1312 1313 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); 1314 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0); 1315 1316 cs->buf[cs->cdw++] = PKT3(PKT3_WAIT_REG_MEM, 5, 0); 1317 cs->buf[cs->cdw++] = WAIT_REG_MEM_EQUAL; /* wait until the register is equal to the reference value */ 1318 cs->buf[cs->cdw++] = R_008490_CP_STRMOUT_CNTL >> 2; /* register */ 1319 cs->buf[cs->cdw++] = 0; 1320 cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* reference value */ 1321 cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* mask */ 1322 cs->buf[cs->cdw++] = 4; /* poll interval */ 1323} 1324 1325static void r600_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit) 1326{ 1327 struct radeon_winsys_cs *cs = ctx->cs; 1328 1329 if (buffer_enable_bit) { 1330 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0); 1331 cs->buf[cs->cdw++] = (R_028AB0_VGT_STRMOUT_EN - R600_CONTEXT_REG_OFFSET) >> 2; 1332 cs->buf[cs->cdw++] = S_028AB0_STREAMOUT(1); 1333 1334 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0); 1335 cs->buf[cs->cdw++] = (R_028B20_VGT_STRMOUT_BUFFER_EN - R600_CONTEXT_REG_OFFSET) >> 2; 1336 cs->buf[cs->cdw++] = buffer_enable_bit; 1337 } else { 1338 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0); 1339 cs->buf[cs->cdw++] = (R_028AB0_VGT_STRMOUT_EN - R600_CONTEXT_REG_OFFSET) >> 2; 1340 cs->buf[cs->cdw++] = S_028AB0_STREAMOUT(0); 1341 } 1342} 1343 1344void r600_context_streamout_begin(struct r600_context *ctx) 1345{ 1346 struct radeon_winsys_cs *cs = ctx->cs; 1347 struct r600_so_target **t = ctx->so_targets; 1348 unsigned *stride_in_dw = ctx->vs_shader->so.stride; 1349 unsigned buffer_en, i, update_flags = 0; 1350 uint64_t va; 1351 1352 buffer_en = (ctx->num_so_targets >= 1 && t[0] ? 1 : 0) | 1353 (ctx->num_so_targets >= 2 && t[1] ? 2 : 0) | 1354 (ctx->num_so_targets >= 3 && t[2] ? 4 : 0) | 1355 (ctx->num_so_targets >= 4 && t[3] ? 8 : 0); 1356 1357 ctx->num_cs_dw_streamout_end = 1358 12 + /* flush_vgt_streamout */ 1359 util_bitcount(buffer_en) * 8 + 1360 3; 1361 1362 r600_need_cs_space(ctx, 1363 12 + /* flush_vgt_streamout */ 1364 6 + /* enables */ 1365 util_bitcount(buffer_en & ctx->streamout_append_bitmask) * 8 + 1366 util_bitcount(buffer_en & ~ctx->streamout_append_bitmask) * 6 + 1367 (ctx->family > CHIP_R600 && ctx->family < CHIP_RV770 ? 2 : 0) + 1368 ctx->num_cs_dw_streamout_end, TRUE); 1369 1370 if (ctx->chip_class >= EVERGREEN) { 1371 evergreen_flush_vgt_streamout(ctx); 1372 evergreen_set_streamout_enable(ctx, buffer_en); 1373 } else { 1374 r600_flush_vgt_streamout(ctx); 1375 r600_set_streamout_enable(ctx, buffer_en); 1376 } 1377 1378 for (i = 0; i < ctx->num_so_targets; i++) { 1379 if (t[i]) { 1380 t[i]->stride_in_dw = stride_in_dw[i]; 1381 t[i]->so_index = i; 1382 va = r600_resource_va(&ctx->screen->screen, 1383 (void*)t[i]->b.buffer); 1384 1385 update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i); 1386 1387 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 3, 0); 1388 cs->buf[cs->cdw++] = (R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 1389 16*i - R600_CONTEXT_REG_OFFSET) >> 2; 1390 cs->buf[cs->cdw++] = (t[i]->b.buffer_offset + 1391 t[i]->b.buffer_size) >> 2; /* BUFFER_SIZE (in DW) */ 1392 cs->buf[cs->cdw++] = stride_in_dw[i]; /* VTX_STRIDE (in DW) */ 1393 cs->buf[cs->cdw++] = va >> 8; /* BUFFER_BASE */ 1394 1395 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 1396 cs->buf[cs->cdw++] = 1397 r600_context_bo_reloc(ctx, r600_resource(t[i]->b.buffer), 1398 RADEON_USAGE_WRITE); 1399 1400 if (ctx->streamout_append_bitmask & (1 << i)) { 1401 va = r600_resource_va(&ctx->screen->screen, 1402 (void*)t[i]->filled_size); 1403 /* Append. */ 1404 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0); 1405 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) | 1406 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM); /* control */ 1407 cs->buf[cs->cdw++] = 0; /* unused */ 1408 cs->buf[cs->cdw++] = 0; /* unused */ 1409 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* src address lo */ 1410 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */ 1411 1412 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 1413 cs->buf[cs->cdw++] = 1414 r600_context_bo_reloc(ctx, t[i]->filled_size, 1415 RADEON_USAGE_READ); 1416 } else { 1417 /* Start from the beginning. */ 1418 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0); 1419 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) | 1420 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET); /* control */ 1421 cs->buf[cs->cdw++] = 0; /* unused */ 1422 cs->buf[cs->cdw++] = 0; /* unused */ 1423 cs->buf[cs->cdw++] = t[i]->b.buffer_offset >> 2; /* buffer offset in DW */ 1424 cs->buf[cs->cdw++] = 0; /* unused */ 1425 } 1426 } 1427 } 1428 1429 if (ctx->family > CHIP_R600 && ctx->family < CHIP_RV770) { 1430 cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0); 1431 cs->buf[cs->cdw++] = update_flags; 1432 } 1433} 1434 1435void r600_context_streamout_end(struct r600_context *ctx) 1436{ 1437 struct radeon_winsys_cs *cs = ctx->cs; 1438 struct r600_so_target **t = ctx->so_targets; 1439 unsigned i, flush_flags = 0; 1440 uint64_t va; 1441 1442 if (ctx->chip_class >= EVERGREEN) { 1443 evergreen_flush_vgt_streamout(ctx); 1444 } else { 1445 r600_flush_vgt_streamout(ctx); 1446 } 1447 1448 for (i = 0; i < ctx->num_so_targets; i++) { 1449 if (t[i]) { 1450 va = r600_resource_va(&ctx->screen->screen, 1451 (void*)t[i]->filled_size); 1452 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0); 1453 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) | 1454 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) | 1455 STRMOUT_STORE_BUFFER_FILLED_SIZE; /* control */ 1456 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* dst address lo */ 1457 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* dst address hi */ 1458 cs->buf[cs->cdw++] = 0; /* unused */ 1459 cs->buf[cs->cdw++] = 0; /* unused */ 1460 1461 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 1462 cs->buf[cs->cdw++] = 1463 r600_context_bo_reloc(ctx, t[i]->filled_size, 1464 RADEON_USAGE_WRITE); 1465 1466 flush_flags |= S_0085F0_SO0_DEST_BASE_ENA(1) << i; 1467 } 1468 } 1469 1470 if (ctx->chip_class >= EVERGREEN) { 1471 evergreen_set_streamout_enable(ctx, 0); 1472 } else { 1473 r600_set_streamout_enable(ctx, 0); 1474 } 1475 1476 /* This is needed to fix cache flushes on r600. */ 1477 if (ctx->chip_class == R600) { 1478 if (ctx->family == CHIP_RV670 || 1479 ctx->family == CHIP_RS780 || 1480 ctx->family == CHIP_RS880) { 1481 flush_flags |= S_0085F0_DEST_BASE_0_ENA(1); 1482 } 1483 1484 r600_atom_dirty(ctx, &ctx->r6xx_flush_and_inv_cmd); 1485 } 1486 1487 /* Flush streamout caches. */ 1488 ctx->surface_sync_cmd.flush_flags |= flush_flags; 1489 r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom); 1490 1491 ctx->num_cs_dw_streamout_end = 0; 1492 1493#if 0 1494 for (i = 0; i < ctx->num_so_targets; i++) { 1495 if (!t[i]) 1496 continue; 1497 1498 uint32_t *ptr = ctx->ws->buffer_map(t[i]->filled_size->buf, ctx->cs, RADEON_USAGE_READ); 1499 printf("FILLED_SIZE%i: %u\n", i, *ptr); 1500 ctx->ws->buffer_unmap(t[i]->filled_size->buf); 1501 } 1502#endif 1503} 1504 1505void r600_context_draw_opaque_count(struct r600_context *ctx, struct r600_so_target *t) 1506{ 1507 struct radeon_winsys_cs *cs = ctx->cs; 1508 uint64_t va = r600_resource_va(&ctx->screen->screen, 1509 (void*)t->filled_size); 1510 1511 r600_need_cs_space(ctx, 14 + 21, TRUE); 1512 1513 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0); 1514 cs->buf[cs->cdw++] = (R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET - R600_CONTEXT_REG_OFFSET) >> 2; 1515 cs->buf[cs->cdw++] = 0; 1516 1517 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0); 1518 cs->buf[cs->cdw++] = (R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE - R600_CONTEXT_REG_OFFSET) >> 2; 1519 cs->buf[cs->cdw++] = t->stride_in_dw; 1520 1521 cs->buf[cs->cdw++] = PKT3(PKT3_COPY_DW, 4, 0); 1522 cs->buf[cs->cdw++] = COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG; 1523 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* src address lo */ 1524 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */ 1525 cs->buf[cs->cdw++] = R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2; /* dst register */ 1526 cs->buf[cs->cdw++] = 0; /* unused */ 1527 1528 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 1529 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, t->filled_size, RADEON_USAGE_READ); 1530} 1531