r300_state.c revision daffaca53e47faeaaefb98ca46fe4870133d9f02
1/* 2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com> 3 * Copyright 2009 Marek Olšák <maraeo@gmail.com> 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * the Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */ 23 24#include "draw/draw_context.h" 25 26#include "util/u_framebuffer.h" 27#include "util/u_math.h" 28#include "util/u_mm.h" 29#include "util/u_memory.h" 30#include "util/u_pack_color.h" 31 32#include "tgsi/tgsi_parse.h" 33 34#include "pipe/p_config.h" 35 36#include "r300_cb.h" 37#include "r300_context.h" 38#include "r300_emit.h" 39#include "r300_reg.h" 40#include "r300_screen.h" 41#include "r300_screen_buffer.h" 42#include "r300_state_inlines.h" 43#include "r300_fs.h" 44#include "r300_texture.h" 45#include "r300_vs.h" 46#include "r300_winsys.h" 47#include "r300_hyperz.h" 48 49/* r300_state: Functions used to intialize state context by translating 50 * Gallium state objects into semi-native r300 state objects. */ 51 52#define UPDATE_STATE(cso, atom) \ 53 if (cso != atom.state) { \ 54 atom.state = cso; \ 55 r300_mark_atom_dirty(r300, &(atom)); \ 56 } 57 58static boolean blend_discard_if_src_alpha_0(unsigned srcRGB, unsigned srcA, 59 unsigned dstRGB, unsigned dstA) 60{ 61 /* If the blend equation is ADD or REVERSE_SUBTRACT, 62 * SRC_ALPHA == 0, and the following state is set, the colorbuffer 63 * will not be changed. 64 * Notice that the dst factors are the src factors inverted. */ 65 return (srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA || 66 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE || 67 srcRGB == PIPE_BLENDFACTOR_ZERO) && 68 (srcA == PIPE_BLENDFACTOR_SRC_COLOR || 69 srcA == PIPE_BLENDFACTOR_SRC_ALPHA || 70 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE || 71 srcA == PIPE_BLENDFACTOR_ZERO) && 72 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 73 dstRGB == PIPE_BLENDFACTOR_ONE) && 74 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR || 75 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 76 dstA == PIPE_BLENDFACTOR_ONE); 77} 78 79static boolean blend_discard_if_src_alpha_1(unsigned srcRGB, unsigned srcA, 80 unsigned dstRGB, unsigned dstA) 81{ 82 /* If the blend equation is ADD or REVERSE_SUBTRACT, 83 * SRC_ALPHA == 1, and the following state is set, the colorbuffer 84 * will not be changed. 85 * Notice that the dst factors are the src factors inverted. */ 86 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 87 srcRGB == PIPE_BLENDFACTOR_ZERO) && 88 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR || 89 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 90 srcA == PIPE_BLENDFACTOR_ZERO) && 91 (dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA || 92 dstRGB == PIPE_BLENDFACTOR_ONE) && 93 (dstA == PIPE_BLENDFACTOR_SRC_COLOR || 94 dstA == PIPE_BLENDFACTOR_SRC_ALPHA || 95 dstA == PIPE_BLENDFACTOR_ONE); 96} 97 98static boolean blend_discard_if_src_color_0(unsigned srcRGB, unsigned srcA, 99 unsigned dstRGB, unsigned dstA) 100{ 101 /* If the blend equation is ADD or REVERSE_SUBTRACT, 102 * SRC_COLOR == (0,0,0), and the following state is set, the colorbuffer 103 * will not be changed. 104 * Notice that the dst factors are the src factors inverted. */ 105 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR || 106 srcRGB == PIPE_BLENDFACTOR_ZERO) && 107 (srcA == PIPE_BLENDFACTOR_ZERO) && 108 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR || 109 dstRGB == PIPE_BLENDFACTOR_ONE) && 110 (dstA == PIPE_BLENDFACTOR_ONE); 111} 112 113static boolean blend_discard_if_src_color_1(unsigned srcRGB, unsigned srcA, 114 unsigned dstRGB, unsigned dstA) 115{ 116 /* If the blend equation is ADD or REVERSE_SUBTRACT, 117 * SRC_COLOR == (1,1,1), and the following state is set, the colorbuffer 118 * will not be changed. 119 * Notice that the dst factors are the src factors inverted. */ 120 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR || 121 srcRGB == PIPE_BLENDFACTOR_ZERO) && 122 (srcA == PIPE_BLENDFACTOR_ZERO) && 123 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR || 124 dstRGB == PIPE_BLENDFACTOR_ONE) && 125 (dstA == PIPE_BLENDFACTOR_ONE); 126} 127 128static boolean blend_discard_if_src_alpha_color_0(unsigned srcRGB, unsigned srcA, 129 unsigned dstRGB, unsigned dstA) 130{ 131 /* If the blend equation is ADD or REVERSE_SUBTRACT, 132 * SRC_ALPHA_COLOR == (0,0,0,0), and the following state is set, 133 * the colorbuffer will not be changed. 134 * Notice that the dst factors are the src factors inverted. */ 135 return (srcRGB == PIPE_BLENDFACTOR_SRC_COLOR || 136 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA || 137 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE || 138 srcRGB == PIPE_BLENDFACTOR_ZERO) && 139 (srcA == PIPE_BLENDFACTOR_SRC_COLOR || 140 srcA == PIPE_BLENDFACTOR_SRC_ALPHA || 141 srcA == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE || 142 srcA == PIPE_BLENDFACTOR_ZERO) && 143 (dstRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR || 144 dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 145 dstRGB == PIPE_BLENDFACTOR_ONE) && 146 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR || 147 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 148 dstA == PIPE_BLENDFACTOR_ONE); 149} 150 151static boolean blend_discard_if_src_alpha_color_1(unsigned srcRGB, unsigned srcA, 152 unsigned dstRGB, unsigned dstA) 153{ 154 /* If the blend equation is ADD or REVERSE_SUBTRACT, 155 * SRC_ALPHA_COLOR == (1,1,1,1), and the following state is set, 156 * the colorbuffer will not be changed. 157 * Notice that the dst factors are the src factors inverted. */ 158 return (srcRGB == PIPE_BLENDFACTOR_INV_SRC_COLOR || 159 srcRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 160 srcRGB == PIPE_BLENDFACTOR_ZERO) && 161 (srcA == PIPE_BLENDFACTOR_INV_SRC_COLOR || 162 srcA == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 163 srcA == PIPE_BLENDFACTOR_ZERO) && 164 (dstRGB == PIPE_BLENDFACTOR_SRC_COLOR || 165 dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA || 166 dstRGB == PIPE_BLENDFACTOR_ONE) && 167 (dstA == PIPE_BLENDFACTOR_SRC_COLOR || 168 dstA == PIPE_BLENDFACTOR_SRC_ALPHA || 169 dstA == PIPE_BLENDFACTOR_ONE); 170} 171 172static unsigned bgra_cmask(unsigned mask) 173{ 174 /* Gallium uses RGBA color ordering while R300 expects BGRA. */ 175 176 return ((mask & PIPE_MASK_R) << 2) | 177 ((mask & PIPE_MASK_B) >> 2) | 178 (mask & (PIPE_MASK_G | PIPE_MASK_A)); 179} 180 181/* Create a new blend state based on the CSO blend state. 182 * 183 * This encompasses alpha blending, logic/raster ops, and blend dithering. */ 184static void* r300_create_blend_state(struct pipe_context* pipe, 185 const struct pipe_blend_state* state) 186{ 187 struct r300_screen* r300screen = r300_screen(pipe->screen); 188 struct r300_blend_state* blend = CALLOC_STRUCT(r300_blend_state); 189 uint32_t blend_control = 0; /* R300_RB3D_CBLEND: 0x4e04 */ 190 uint32_t alpha_blend_control = 0; /* R300_RB3D_ABLEND: 0x4e08 */ 191 uint32_t color_channel_mask = 0; /* R300_RB3D_COLOR_CHANNEL_MASK: 0x4e0c */ 192 uint32_t rop = 0; /* R300_RB3D_ROPCNTL: 0x4e18 */ 193 uint32_t dither = 0; /* R300_RB3D_DITHER_CTL: 0x4e50 */ 194 CB_LOCALS; 195 196 if (state->rt[0].blend_enable) 197 { 198 unsigned eqRGB = state->rt[0].rgb_func; 199 unsigned srcRGB = state->rt[0].rgb_src_factor; 200 unsigned dstRGB = state->rt[0].rgb_dst_factor; 201 202 unsigned eqA = state->rt[0].alpha_func; 203 unsigned srcA = state->rt[0].alpha_src_factor; 204 unsigned dstA = state->rt[0].alpha_dst_factor; 205 206 /* despite the name, ALPHA_BLEND_ENABLE has nothing to do with alpha, 207 * this is just the crappy D3D naming */ 208 blend_control = R300_ALPHA_BLEND_ENABLE | 209 r300_translate_blend_function(eqRGB) | 210 ( r300_translate_blend_factor(srcRGB) << R300_SRC_BLEND_SHIFT) | 211 ( r300_translate_blend_factor(dstRGB) << R300_DST_BLEND_SHIFT); 212 213 /* Optimization: some operations do not require the destination color. 214 * 215 * When SRC_ALPHA_SATURATE is used, colorbuffer reads must be enabled, 216 * otherwise blending gives incorrect results. It seems to be 217 * a hardware bug. */ 218 if (eqRGB == PIPE_BLEND_MIN || eqA == PIPE_BLEND_MIN || 219 eqRGB == PIPE_BLEND_MAX || eqA == PIPE_BLEND_MAX || 220 dstRGB != PIPE_BLENDFACTOR_ZERO || 221 dstA != PIPE_BLENDFACTOR_ZERO || 222 srcRGB == PIPE_BLENDFACTOR_DST_COLOR || 223 srcRGB == PIPE_BLENDFACTOR_DST_ALPHA || 224 srcRGB == PIPE_BLENDFACTOR_INV_DST_COLOR || 225 srcRGB == PIPE_BLENDFACTOR_INV_DST_ALPHA || 226 srcA == PIPE_BLENDFACTOR_DST_COLOR || 227 srcA == PIPE_BLENDFACTOR_DST_ALPHA || 228 srcA == PIPE_BLENDFACTOR_INV_DST_COLOR || 229 srcA == PIPE_BLENDFACTOR_INV_DST_ALPHA || 230 srcRGB == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE) { 231 /* Enable reading from the colorbuffer. */ 232 blend_control |= R300_READ_ENABLE; 233 234 if (r300screen->caps.is_r500) { 235 /* Optimization: Depending on incoming pixels, we can 236 * conditionally disable the reading in hardware... */ 237 if (eqRGB != PIPE_BLEND_MIN && eqA != PIPE_BLEND_MIN && 238 eqRGB != PIPE_BLEND_MAX && eqA != PIPE_BLEND_MAX) { 239 /* Disable reading if SRC_ALPHA == 0. */ 240 if ((dstRGB == PIPE_BLENDFACTOR_SRC_ALPHA || 241 dstRGB == PIPE_BLENDFACTOR_ZERO) && 242 (dstA == PIPE_BLENDFACTOR_SRC_COLOR || 243 dstA == PIPE_BLENDFACTOR_SRC_ALPHA || 244 dstA == PIPE_BLENDFACTOR_ZERO)) { 245 blend_control |= R500_SRC_ALPHA_0_NO_READ; 246 } 247 248 /* Disable reading if SRC_ALPHA == 1. */ 249 if ((dstRGB == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 250 dstRGB == PIPE_BLENDFACTOR_ZERO) && 251 (dstA == PIPE_BLENDFACTOR_INV_SRC_COLOR || 252 dstA == PIPE_BLENDFACTOR_INV_SRC_ALPHA || 253 dstA == PIPE_BLENDFACTOR_ZERO)) { 254 blend_control |= R500_SRC_ALPHA_1_NO_READ; 255 } 256 } 257 } 258 } 259 260 /* Optimization: discard pixels which don't change the colorbuffer. 261 * 262 * The code below is non-trivial and some math is involved. 263 * 264 * Discarding pixels must be disabled when FP16 AA is enabled. 265 * This is a hardware bug. Also, this implementation wouldn't work 266 * with FP blending enabled and equation clamping disabled. 267 * 268 * Equations other than ADD are rarely used and therefore won't be 269 * optimized. */ 270 if ((eqRGB == PIPE_BLEND_ADD || eqRGB == PIPE_BLEND_REVERSE_SUBTRACT) && 271 (eqA == PIPE_BLEND_ADD || eqA == PIPE_BLEND_REVERSE_SUBTRACT)) { 272 /* ADD: X+Y 273 * REVERSE_SUBTRACT: Y-X 274 * 275 * The idea is: 276 * If X = src*srcFactor = 0 and Y = dst*dstFactor = 1, 277 * then CB will not be changed. 278 * 279 * Given the srcFactor and dstFactor variables, we can derive 280 * what src and dst should be equal to and discard appropriate 281 * pixels. 282 */ 283 if (blend_discard_if_src_alpha_0(srcRGB, srcA, dstRGB, dstA)) { 284 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_0; 285 } else if (blend_discard_if_src_alpha_1(srcRGB, srcA, 286 dstRGB, dstA)) { 287 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_ALPHA_1; 288 } else if (blend_discard_if_src_color_0(srcRGB, srcA, 289 dstRGB, dstA)) { 290 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_0; 291 } else if (blend_discard_if_src_color_1(srcRGB, srcA, 292 dstRGB, dstA)) { 293 blend_control |= R300_DISCARD_SRC_PIXELS_SRC_COLOR_1; 294 } else if (blend_discard_if_src_alpha_color_0(srcRGB, srcA, 295 dstRGB, dstA)) { 296 blend_control |= 297 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_0; 298 } else if (blend_discard_if_src_alpha_color_1(srcRGB, srcA, 299 dstRGB, dstA)) { 300 blend_control |= 301 R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_1; 302 } 303 } 304 305 /* separate alpha */ 306 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) { 307 blend_control |= R300_SEPARATE_ALPHA_ENABLE; 308 alpha_blend_control = 309 r300_translate_blend_function(eqA) | 310 (r300_translate_blend_factor(srcA) << R300_SRC_BLEND_SHIFT) | 311 (r300_translate_blend_factor(dstA) << R300_DST_BLEND_SHIFT); 312 } 313 } 314 315 /* PIPE_LOGICOP_* don't need to be translated, fortunately. */ 316 if (state->logicop_enable) { 317 rop = R300_RB3D_ROPCNTL_ROP_ENABLE | 318 (state->logicop_func) << R300_RB3D_ROPCNTL_ROP_SHIFT; 319 } 320 321 /* Color channel masks for all MRTs. */ 322 color_channel_mask = bgra_cmask(state->rt[0].colormask); 323 if (r300screen->caps.is_r500 && state->independent_blend_enable) { 324 if (state->rt[1].blend_enable) { 325 color_channel_mask |= bgra_cmask(state->rt[1].colormask) << 4; 326 } 327 if (state->rt[2].blend_enable) { 328 color_channel_mask |= bgra_cmask(state->rt[2].colormask) << 8; 329 } 330 if (state->rt[3].blend_enable) { 331 color_channel_mask |= bgra_cmask(state->rt[3].colormask) << 12; 332 } 333 } 334 335 /* Neither fglrx nor classic r300 ever set this, regardless of dithering 336 * state. Since it's an optional implementation detail, we can leave it 337 * out and never dither. 338 * 339 * This could be revisited if we ever get quality or conformance hints. 340 * 341 if (state->dither) { 342 dither = R300_RB3D_DITHER_CTL_DITHER_MODE_LUT | 343 R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_LUT; 344 } 345 */ 346 347 /* Build a command buffer. */ 348 BEGIN_CB(blend->cb, 8); 349 OUT_CB_REG(R300_RB3D_ROPCNTL, rop); 350 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3); 351 OUT_CB(blend_control); 352 OUT_CB(alpha_blend_control); 353 OUT_CB(color_channel_mask); 354 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither); 355 END_CB; 356 357 /* The same as above, but with no colorbuffer reads and writes. */ 358 BEGIN_CB(blend->cb_no_readwrite, 8); 359 OUT_CB_REG(R300_RB3D_ROPCNTL, rop); 360 OUT_CB_REG_SEQ(R300_RB3D_CBLEND, 3); 361 OUT_CB(0); 362 OUT_CB(0); 363 OUT_CB(0); 364 OUT_CB_REG(R300_RB3D_DITHER_CTL, dither); 365 END_CB; 366 367 return (void*)blend; 368} 369 370/* Bind blend state. */ 371static void r300_bind_blend_state(struct pipe_context* pipe, 372 void* state) 373{ 374 struct r300_context* r300 = r300_context(pipe); 375 376 UPDATE_STATE(state, r300->blend_state); 377} 378 379/* Free blend state. */ 380static void r300_delete_blend_state(struct pipe_context* pipe, 381 void* state) 382{ 383 FREE(state); 384} 385 386/* Convert float to 10bit integer */ 387static unsigned float_to_fixed10(float f) 388{ 389 return CLAMP((unsigned)(f * 1023.9f), 0, 1023); 390} 391 392/* Set blend color. 393 * Setup both R300 and R500 registers, figure out later which one to write. */ 394static void r300_set_blend_color(struct pipe_context* pipe, 395 const struct pipe_blend_color* color) 396{ 397 struct r300_context* r300 = r300_context(pipe); 398 struct r300_blend_color_state* state = 399 (struct r300_blend_color_state*)r300->blend_color_state.state; 400 CB_LOCALS; 401 402 if (r300->screen->caps.is_r500) { 403 /* XXX if FP16 blending is enabled, we should use the FP16 format */ 404 BEGIN_CB(state->cb, 3); 405 OUT_CB_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR, 2); 406 OUT_CB(float_to_fixed10(color->color[0]) | 407 (float_to_fixed10(color->color[3]) << 16)); 408 OUT_CB(float_to_fixed10(color->color[2]) | 409 (float_to_fixed10(color->color[1]) << 16)); 410 END_CB; 411 } else { 412 union util_color uc; 413 util_pack_color(color->color, PIPE_FORMAT_B8G8R8A8_UNORM, &uc); 414 415 BEGIN_CB(state->cb, 2); 416 OUT_CB_REG(R300_RB3D_BLEND_COLOR, uc.ui); 417 END_CB; 418 } 419 420 r300_mark_atom_dirty(r300, &r300->blend_color_state); 421} 422 423static void r300_set_clip_state(struct pipe_context* pipe, 424 const struct pipe_clip_state* state) 425{ 426 struct r300_context* r300 = r300_context(pipe); 427 struct r300_clip_state *clip = 428 (struct r300_clip_state*)r300->clip_state.state; 429 CB_LOCALS; 430 431 clip->clip = *state; 432 433 if (r300->screen->caps.has_tcl) { 434 r300->clip_state.size = 2 + !!state->nr * 3 + state->nr * 4; 435 436 BEGIN_CB(clip->cb, r300->clip_state.size); 437 if (state->nr) { 438 OUT_CB_REG(R300_VAP_PVS_VECTOR_INDX_REG, 439 (r300->screen->caps.is_r500 ? 440 R500_PVS_UCP_START : R300_PVS_UCP_START)); 441 OUT_CB_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, state->nr * 4); 442 OUT_CB_TABLE(state->ucp, state->nr * 4); 443 } 444 OUT_CB_REG(R300_VAP_CLIP_CNTL, ((1 << state->nr) - 1) | 445 R300_PS_UCP_MODE_CLIP_AS_TRIFAN | 446 (state->depth_clamp ? R300_CLIP_DISABLE : 0)); 447 END_CB; 448 449 r300_mark_atom_dirty(r300, &r300->clip_state); 450 } else { 451 draw_set_clip_state(r300->draw, state); 452 } 453} 454 455static void 456r300_set_sample_mask(struct pipe_context *pipe, 457 unsigned sample_mask) 458{ 459} 460 461 462/* Create a new depth, stencil, and alpha state based on the CSO dsa state. 463 * 464 * This contains the depth buffer, stencil buffer, alpha test, and such. 465 * On the Radeon, depth and stencil buffer setup are intertwined, which is 466 * the reason for some of the strange-looking assignments across registers. */ 467static void* 468 r300_create_dsa_state(struct pipe_context* pipe, 469 const struct pipe_depth_stencil_alpha_state* state) 470{ 471 struct r300_capabilities *caps = &r300_screen(pipe->screen)->caps; 472 struct r300_dsa_state* dsa = CALLOC_STRUCT(r300_dsa_state); 473 CB_LOCALS; 474 475 dsa->dsa = *state; 476 477 /* Depth test setup. - separate write mask depth for decomp flush */ 478 if (state->depth.writemask) { 479 dsa->z_buffer_control |= R300_Z_WRITE_ENABLE; 480 } 481 482 if (state->depth.enabled) { 483 dsa->z_buffer_control |= R300_Z_ENABLE; 484 485 dsa->z_stencil_control |= 486 (r300_translate_depth_stencil_function(state->depth.func) << 487 R300_Z_FUNC_SHIFT); 488 } 489 490 /* Stencil buffer setup. */ 491 if (state->stencil[0].enabled) { 492 dsa->z_buffer_control |= R300_STENCIL_ENABLE; 493 dsa->z_stencil_control |= 494 (r300_translate_depth_stencil_function(state->stencil[0].func) << 495 R300_S_FRONT_FUNC_SHIFT) | 496 (r300_translate_stencil_op(state->stencil[0].fail_op) << 497 R300_S_FRONT_SFAIL_OP_SHIFT) | 498 (r300_translate_stencil_op(state->stencil[0].zpass_op) << 499 R300_S_FRONT_ZPASS_OP_SHIFT) | 500 (r300_translate_stencil_op(state->stencil[0].zfail_op) << 501 R300_S_FRONT_ZFAIL_OP_SHIFT); 502 503 dsa->stencil_ref_mask = 504 (state->stencil[0].valuemask << R300_STENCILMASK_SHIFT) | 505 (state->stencil[0].writemask << R300_STENCILWRITEMASK_SHIFT); 506 507 if (state->stencil[1].enabled) { 508 dsa->two_sided = TRUE; 509 510 dsa->z_buffer_control |= R300_STENCIL_FRONT_BACK; 511 dsa->z_stencil_control |= 512 (r300_translate_depth_stencil_function(state->stencil[1].func) << 513 R300_S_BACK_FUNC_SHIFT) | 514 (r300_translate_stencil_op(state->stencil[1].fail_op) << 515 R300_S_BACK_SFAIL_OP_SHIFT) | 516 (r300_translate_stencil_op(state->stencil[1].zpass_op) << 517 R300_S_BACK_ZPASS_OP_SHIFT) | 518 (r300_translate_stencil_op(state->stencil[1].zfail_op) << 519 R300_S_BACK_ZFAIL_OP_SHIFT); 520 521 dsa->stencil_ref_bf = 522 (state->stencil[1].valuemask << R300_STENCILMASK_SHIFT) | 523 (state->stencil[1].writemask << R300_STENCILWRITEMASK_SHIFT); 524 525 if (caps->is_r500) { 526 dsa->z_buffer_control |= R500_STENCIL_REFMASK_FRONT_BACK; 527 } else { 528 dsa->two_sided_stencil_ref = 529 (state->stencil[0].valuemask != state->stencil[1].valuemask || 530 state->stencil[0].writemask != state->stencil[1].writemask); 531 } 532 } 533 } 534 535 /* Alpha test setup. */ 536 if (state->alpha.enabled) { 537 dsa->alpha_function = 538 r300_translate_alpha_function(state->alpha.func) | 539 R300_FG_ALPHA_FUNC_ENABLE; 540 541 /* We could use 10bit alpha ref but who needs that? */ 542 dsa->alpha_function |= float_to_ubyte(state->alpha.ref_value); 543 544 if (caps->is_r500) 545 dsa->alpha_function |= R500_FG_ALPHA_FUNC_8BIT; 546 } 547 548 BEGIN_CB(&dsa->cb_begin, 8); 549 OUT_CB_REG(R300_FG_ALPHA_FUNC, dsa->alpha_function); 550 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3); 551 OUT_CB(dsa->z_buffer_control); 552 OUT_CB(dsa->z_stencil_control); 553 OUT_CB(dsa->stencil_ref_mask); 554 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, dsa->stencil_ref_bf); 555 END_CB; 556 557 BEGIN_CB(dsa->cb_no_readwrite, 8); 558 OUT_CB_REG(R300_FG_ALPHA_FUNC, dsa->alpha_function); 559 OUT_CB_REG_SEQ(R300_ZB_CNTL, 3); 560 OUT_CB(0); 561 OUT_CB(0); 562 OUT_CB(0); 563 OUT_CB_REG(R500_ZB_STENCILREFMASK_BF, 0); 564 END_CB; 565 566 return (void*)dsa; 567} 568 569static void r300_dsa_inject_stencilref(struct r300_context *r300) 570{ 571 struct r300_dsa_state *dsa = 572 (struct r300_dsa_state*)r300->dsa_state.state; 573 574 if (!dsa) 575 return; 576 577 dsa->stencil_ref_mask = 578 (dsa->stencil_ref_mask & ~R300_STENCILREF_MASK) | 579 r300->stencil_ref.ref_value[0]; 580 dsa->stencil_ref_bf = 581 (dsa->stencil_ref_bf & ~R300_STENCILREF_MASK) | 582 r300->stencil_ref.ref_value[1]; 583} 584 585/* Bind DSA state. */ 586static void r300_bind_dsa_state(struct pipe_context* pipe, 587 void* state) 588{ 589 struct r300_context* r300 = r300_context(pipe); 590 591 if (!state) { 592 return; 593 } 594 595 UPDATE_STATE(state, r300->dsa_state); 596 597 r300_mark_atom_dirty(r300, &r300->hyperz_state); /* Will be updated before the emission. */ 598 r300_dsa_inject_stencilref(r300); 599} 600 601/* Free DSA state. */ 602static void r300_delete_dsa_state(struct pipe_context* pipe, 603 void* state) 604{ 605 FREE(state); 606} 607 608static void r300_set_stencil_ref(struct pipe_context* pipe, 609 const struct pipe_stencil_ref* sr) 610{ 611 struct r300_context* r300 = r300_context(pipe); 612 613 r300->stencil_ref = *sr; 614 615 r300_dsa_inject_stencilref(r300); 616 r300_mark_atom_dirty(r300, &r300->dsa_state); 617} 618 619static void r300_tex_set_tiling_flags(struct r300_context *r300, 620 struct r300_texture *tex, unsigned level) 621{ 622 /* Check if the macrotile flag needs to be changed. 623 * Skip changing the flags otherwise. */ 624 if (tex->desc.macrotile[tex->surface_level] != 625 tex->desc.macrotile[level]) { 626 /* Tiling determines how DRM treats the buffer data. 627 * We must flush CS when changing it if the buffer is referenced. */ 628 if (r300->rws->cs_is_buffer_referenced(r300->cs, 629 tex->cs_buffer, R300_REF_CS)) 630 r300->context.flush(&r300->context, 0, NULL); 631 632 r300->rws->buffer_set_tiling(r300->rws, tex->buffer, 633 tex->desc.microtile, tex->desc.macrotile[level], 634 tex->desc.stride_in_bytes[0]); 635 636 tex->surface_level = level; 637 } 638} 639 640/* This switcheroo is needed just because of goddamned MACRO_SWITCH. */ 641static void r300_fb_set_tiling_flags(struct r300_context *r300, 642 const struct pipe_framebuffer_state *state) 643{ 644 unsigned i; 645 646 /* Set tiling flags for new surfaces. */ 647 for (i = 0; i < state->nr_cbufs; i++) { 648 r300_tex_set_tiling_flags(r300, 649 r300_texture(state->cbufs[i]->texture), 650 state->cbufs[i]->u.tex.level); 651 } 652 if (state->zsbuf) { 653 r300_tex_set_tiling_flags(r300, 654 r300_texture(state->zsbuf->texture), 655 state->zsbuf->u.tex.level); 656 } 657} 658 659static void r300_print_fb_surf_info(struct pipe_surface *surf, unsigned index, 660 const char *binding) 661{ 662 struct pipe_resource *tex = surf->texture; 663 struct r300_texture *rtex = r300_texture(tex); 664 665 fprintf(stderr, 666 "r300: %s[%i] Dim: %ix%i, Firstlayer: %i, " 667 "Lastlayer: %i, Level: %i, Format: %s\n" 668 669 "r300: TEX: Macro: %s, Micro: %s, Pitch: %i, " 670 "Dim: %ix%ix%i, LastLevel: %i, Format: %s\n", 671 672 binding, index, surf->width, surf->height, 673 surf->u.tex.first_layer, surf->u.tex.last_layer, surf->u.tex.level, 674 util_format_short_name(surf->format), 675 676 rtex->desc.macrotile[0] ? "YES" : " NO", 677 rtex->desc.microtile ? "YES" : " NO", 678 rtex->desc.stride_in_pixels[0], 679 tex->width0, tex->height0, tex->depth0, 680 tex->last_level, util_format_short_name(tex->format)); 681} 682 683void r300_mark_fb_state_dirty(struct r300_context *r300, 684 enum r300_fb_state_change change) 685{ 686 struct pipe_framebuffer_state *state = r300->fb_state.state; 687 boolean can_hyperz = r300->rws->get_value(r300->rws, R300_CAN_HYPERZ); 688 689 /* What is marked as dirty depends on the enum r300_fb_state_change. */ 690 r300_mark_atom_dirty(r300, &r300->gpu_flush); 691 r300_mark_atom_dirty(r300, &r300->fb_state); 692 r300_mark_atom_dirty(r300, &r300->hyperz_state); 693 694 if (change == R300_CHANGED_FB_STATE) { 695 r300_mark_atom_dirty(r300, &r300->aa_state); 696 r300_mark_atom_dirty(r300, &r300->fb_state_pipelined); 697 } 698 699 /* Now compute the fb_state atom size. */ 700 r300->fb_state.size = 2 + (8 * state->nr_cbufs); 701 702 if (r300->cbzb_clear) 703 r300->fb_state.size += 10; 704 else if (state->zsbuf) { 705 r300->fb_state.size += 10; 706 if (can_hyperz) 707 r300->fb_state.size += r300->screen->caps.hiz_ram ? 8 : 4; 708 } 709 710 /* The size of the rest of atoms stays the same. */ 711} 712 713static void 714 r300_set_framebuffer_state(struct pipe_context* pipe, 715 const struct pipe_framebuffer_state* state) 716{ 717 struct r300_context* r300 = r300_context(pipe); 718 struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state; 719 struct pipe_framebuffer_state *old_state = r300->fb_state.state; 720 boolean can_hyperz = r300->rws->get_value(r300->rws, R300_CAN_HYPERZ); 721 unsigned max_width, max_height, i; 722 uint32_t zbuffer_bpp = 0; 723 int blocksize; 724 725 if (r300->screen->caps.is_r500) { 726 max_width = max_height = 4096; 727 } else if (r300->screen->caps.is_r400) { 728 max_width = max_height = 4021; 729 } else { 730 max_width = max_height = 2560; 731 } 732 733 if (state->width > max_width || state->height > max_height) { 734 fprintf(stderr, "r300: Implementation error: Render targets are too " 735 "big in %s, refusing to bind framebuffer state!\n", __FUNCTION__); 736 return; 737 } 738 739 /* If nr_cbufs is changed from zero to non-zero or vice versa... */ 740 if (!!old_state->nr_cbufs != !!state->nr_cbufs) { 741 r300_mark_atom_dirty(r300, &r300->blend_state); 742 } 743 /* If zsbuf is set from NULL to non-NULL or vice versa.. */ 744 if (!!old_state->zsbuf != !!state->zsbuf) { 745 r300_mark_atom_dirty(r300, &r300->dsa_state); 746 } 747 748 /* The tiling flags are dependent on the surface miplevel, unfortunately. */ 749 r300_fb_set_tiling_flags(r300, state); 750 751 util_copy_framebuffer_state(r300->fb_state.state, state); 752 753 r300_mark_fb_state_dirty(r300, R300_CHANGED_FB_STATE); 754 r300->validate_buffers = TRUE; 755 756 r300->z_compression = false; 757 758 if (state->zsbuf) { 759 blocksize = util_format_get_blocksize(state->zsbuf->texture->format); 760 switch (blocksize) { 761 case 2: 762 zbuffer_bpp = 16; 763 break; 764 case 4: 765 zbuffer_bpp = 24; 766 break; 767 } 768 if (can_hyperz) { 769 struct r300_surface *zs_surf = r300_surface(state->zsbuf); 770 struct r300_texture *tex; 771 int compress = r300->screen->caps.is_rv350 ? RV350_Z_COMPRESS_88 : R300_Z_COMPRESS_44; 772 int level = zs_surf->base.u.tex.level; 773 774 tex = r300_texture(zs_surf->base.texture); 775 776 /* work out whether we can support hiz on this buffer */ 777 r300_hiz_alloc_block(r300, zs_surf); 778 779 /* work out whether we can support zmask features on this buffer */ 780 r300_zmask_alloc_block(r300, zs_surf, compress); 781 782 if (tex->zmask_mem[level]) { 783 /* compression causes hangs on 16-bit */ 784 if (zbuffer_bpp == 24) 785 r300->z_compression = compress; 786 } 787 DBG(r300, DBG_HYPERZ, 788 "hyper-z features: hiz: %d @ %08x z-compression: %d z-fastfill: %d @ %08x\n", tex->hiz_mem[level] ? 1 : 0, 789 tex->hiz_mem[level] ? tex->hiz_mem[level]->ofs : 0xdeadbeef, 790 r300->z_compression, tex->zmask_mem[level] ? 1 : 0, 791 tex->zmask_mem[level] ? tex->zmask_mem[level]->ofs : 0xdeadbeef); 792 } 793 794 /* Polygon offset depends on the zbuffer bit depth. */ 795 if (r300->zbuffer_bpp != zbuffer_bpp) { 796 r300->zbuffer_bpp = zbuffer_bpp; 797 798 if (r300->polygon_offset_enabled) 799 r300_mark_atom_dirty(r300, &r300->rs_state); 800 } 801 } 802 803 /* Set up AA config. */ 804 if (r300->rws->get_value(r300->rws, R300_VID_DRM_2_3_0)) { 805 if (state->nr_cbufs && state->cbufs[0]->texture->nr_samples > 1) { 806 aa->aa_config = R300_GB_AA_CONFIG_AA_ENABLE; 807 808 switch (state->cbufs[0]->texture->nr_samples) { 809 case 2: 810 aa->aa_config |= R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_2; 811 break; 812 case 3: 813 aa->aa_config |= R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_3; 814 break; 815 case 4: 816 aa->aa_config |= R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_4; 817 break; 818 case 6: 819 aa->aa_config |= R300_GB_AA_CONFIG_NUM_AA_SUBSAMPLES_6; 820 break; 821 } 822 } else { 823 aa->aa_config = 0; 824 } 825 } 826 827 if (DBG_ON(r300, DBG_FB)) { 828 fprintf(stderr, "r300: set_framebuffer_state:\n"); 829 for (i = 0; i < state->nr_cbufs; i++) { 830 r300_print_fb_surf_info(state->cbufs[i], i, "CB"); 831 } 832 if (state->zsbuf) { 833 r300_print_fb_surf_info(state->zsbuf, 0, "ZB"); 834 } 835 } 836} 837 838/* Create fragment shader state. */ 839static void* r300_create_fs_state(struct pipe_context* pipe, 840 const struct pipe_shader_state* shader) 841{ 842 struct r300_fragment_shader* fs = NULL; 843 844 fs = (struct r300_fragment_shader*)CALLOC_STRUCT(r300_fragment_shader); 845 846 /* Copy state directly into shader. */ 847 fs->state = *shader; 848 fs->state.tokens = tgsi_dup_tokens(shader->tokens); 849 850 return (void*)fs; 851} 852 853void r300_mark_fs_code_dirty(struct r300_context *r300) 854{ 855 struct r300_fragment_shader* fs = r300_fs(r300); 856 857 r300_mark_atom_dirty(r300, &r300->fs); 858 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state); 859 r300_mark_atom_dirty(r300, &r300->fs_constants); 860 r300->fs.size = fs->shader->cb_code_size; 861 862 if (r300->screen->caps.is_r500) { 863 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 7; 864 r300->fs_constants.size = fs->shader->externals_count * 4 + 3; 865 } else { 866 r300->fs_rc_constant_state.size = fs->shader->rc_state_count * 5; 867 r300->fs_constants.size = fs->shader->externals_count * 4 + 1; 868 } 869 870 ((struct r300_constant_buffer*)r300->fs_constants.state)->remap_table = 871 fs->shader->code.constants_remap_table; 872} 873 874/* Bind fragment shader state. */ 875static void r300_bind_fs_state(struct pipe_context* pipe, void* shader) 876{ 877 struct r300_context* r300 = r300_context(pipe); 878 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader; 879 880 if (fs == NULL) { 881 r300->fs.state = NULL; 882 return; 883 } 884 885 r300->fs.state = fs; 886 r300_pick_fragment_shader(r300); 887 r300_mark_fs_code_dirty(r300); 888 889 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */ 890} 891 892/* Delete fragment shader state. */ 893static void r300_delete_fs_state(struct pipe_context* pipe, void* shader) 894{ 895 struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader; 896 struct r300_fragment_shader_code *tmp, *ptr = fs->first; 897 898 while (ptr) { 899 tmp = ptr; 900 ptr = ptr->next; 901 rc_constants_destroy(&tmp->code.constants); 902 FREE(tmp->cb_code); 903 FREE(tmp); 904 } 905 FREE((void*)fs->state.tokens); 906 FREE(shader); 907} 908 909static void r300_set_polygon_stipple(struct pipe_context* pipe, 910 const struct pipe_poly_stipple* state) 911{ 912 /* XXX no idea how to set this up, but not terribly important */ 913} 914 915/* Create a new rasterizer state based on the CSO rasterizer state. 916 * 917 * This is a very large chunk of state, and covers most of the graphics 918 * backend (GB), geometry assembly (GA), and setup unit (SU) blocks. 919 * 920 * In a not entirely unironic sidenote, this state has nearly nothing to do 921 * with the actual block on the Radeon called the rasterizer (RS). */ 922static void* r300_create_rs_state(struct pipe_context* pipe, 923 const struct pipe_rasterizer_state* state) 924{ 925 struct r300_rs_state* rs = CALLOC_STRUCT(r300_rs_state); 926 float psiz; 927 uint32_t vap_control_status; /* R300_VAP_CNTL_STATUS: 0x2140 */ 928 uint32_t point_size; /* R300_GA_POINT_SIZE: 0x421c */ 929 uint32_t point_minmax; /* R300_GA_POINT_MINMAX: 0x4230 */ 930 uint32_t line_control; /* R300_GA_LINE_CNTL: 0x4234 */ 931 uint32_t polygon_offset_enable; /* R300_SU_POLY_OFFSET_ENABLE: 0x42b4 */ 932 uint32_t cull_mode; /* R300_SU_CULL_MODE: 0x42b8 */ 933 uint32_t line_stipple_config; /* R300_GA_LINE_STIPPLE_CONFIG: 0x4328 */ 934 uint32_t line_stipple_value; /* R300_GA_LINE_STIPPLE_VALUE: 0x4260 */ 935 uint32_t polygon_mode; /* R300_GA_POLY_MODE: 0x4288 */ 936 uint32_t clip_rule; /* R300_SC_CLIP_RULE: 0x43D0 */ 937 938 /* Point sprites texture coordinates, 0: lower left, 1: upper right */ 939 float point_texcoord_left = 0; /* R300_GA_POINT_S0: 0x4200 */ 940 float point_texcoord_bottom = 0;/* R300_GA_POINT_T0: 0x4204 */ 941 float point_texcoord_right = 1; /* R300_GA_POINT_S1: 0x4208 */ 942 float point_texcoord_top = 0; /* R300_GA_POINT_T1: 0x420c */ 943 CB_LOCALS; 944 945 /* Copy rasterizer state. */ 946 rs->rs = *state; 947 rs->rs_draw = *state; 948 949 rs->rs.sprite_coord_enable = state->point_quad_rasterization * 950 state->sprite_coord_enable; 951 952 /* Override some states for Draw. */ 953 rs->rs_draw.sprite_coord_enable = 0; /* We can do this in HW. */ 954 955#ifdef PIPE_ARCH_LITTLE_ENDIAN 956 vap_control_status = R300_VC_NO_SWAP; 957#else 958 vap_control_status = R300_VC_32BIT_SWAP; 959#endif 960 961 /* If no TCL engine is present, turn off the HW TCL. */ 962 if (!r300_screen(pipe->screen)->caps.has_tcl) { 963 vap_control_status |= R300_VAP_TCL_BYPASS; 964 } 965 966 /* Point size width and height. */ 967 point_size = 968 pack_float_16_6x(state->point_size) | 969 (pack_float_16_6x(state->point_size) << R300_POINTSIZE_X_SHIFT); 970 971 /* Point size clamping. */ 972 if (state->point_size_per_vertex) { 973 /* Per-vertex point size. 974 * Clamp to [0, max FB size] */ 975 psiz = pipe->screen->get_paramf(pipe->screen, 976 PIPE_CAP_MAX_POINT_WIDTH); 977 point_minmax = 978 pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT; 979 } else { 980 /* We cannot disable the point-size vertex output, 981 * so clamp it. */ 982 psiz = state->point_size; 983 point_minmax = 984 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MIN_SHIFT) | 985 (pack_float_16_6x(psiz) << R300_GA_POINT_MINMAX_MAX_SHIFT); 986 } 987 988 /* Line control. */ 989 line_control = pack_float_16_6x(state->line_width) | 990 R300_GA_LINE_CNTL_END_TYPE_COMP; 991 992 /* Enable polygon mode */ 993 polygon_mode = 0; 994 if (state->fill_front != PIPE_POLYGON_MODE_FILL || 995 state->fill_back != PIPE_POLYGON_MODE_FILL) { 996 polygon_mode = R300_GA_POLY_MODE_DUAL; 997 } 998 999 /* Front face */ 1000 if (state->front_ccw) 1001 cull_mode = R300_FRONT_FACE_CCW; 1002 else 1003 cull_mode = R300_FRONT_FACE_CW; 1004 1005 /* Polygon offset */ 1006 polygon_offset_enable = 0; 1007 if (util_get_offset(state, state->fill_front)) { 1008 polygon_offset_enable |= R300_FRONT_ENABLE; 1009 } 1010 if (util_get_offset(state, state->fill_back)) { 1011 polygon_offset_enable |= R300_BACK_ENABLE; 1012 } 1013 1014 rs->polygon_offset_enable = polygon_offset_enable != 0; 1015 1016 /* Polygon mode */ 1017 if (polygon_mode) { 1018 polygon_mode |= 1019 r300_translate_polygon_mode_front(state->fill_front); 1020 polygon_mode |= 1021 r300_translate_polygon_mode_back(state->fill_back); 1022 } 1023 1024 if (state->cull_face & PIPE_FACE_FRONT) { 1025 cull_mode |= R300_CULL_FRONT; 1026 } 1027 if (state->cull_face & PIPE_FACE_BACK) { 1028 cull_mode |= R300_CULL_BACK; 1029 } 1030 1031 if (state->line_stipple_enable) { 1032 line_stipple_config = 1033 R300_GA_LINE_STIPPLE_CONFIG_LINE_RESET_LINE | 1034 (fui((float)state->line_stipple_factor) & 1035 R300_GA_LINE_STIPPLE_CONFIG_STIPPLE_SCALE_MASK); 1036 /* XXX this might need to be scaled up */ 1037 line_stipple_value = state->line_stipple_pattern; 1038 } else { 1039 line_stipple_config = 0; 1040 line_stipple_value = 0; 1041 } 1042 1043 if (state->flatshade) { 1044 rs->color_control = R300_SHADE_MODEL_FLAT; 1045 } else { 1046 rs->color_control = R300_SHADE_MODEL_SMOOTH; 1047 } 1048 1049 clip_rule = state->scissor ? 0xAAAA : 0xFFFF; 1050 1051 /* Point sprites coord mode */ 1052 if (rs->rs.sprite_coord_enable) { 1053 switch (state->sprite_coord_mode) { 1054 case PIPE_SPRITE_COORD_UPPER_LEFT: 1055 point_texcoord_top = 0.0f; 1056 point_texcoord_bottom = 1.0f; 1057 break; 1058 case PIPE_SPRITE_COORD_LOWER_LEFT: 1059 point_texcoord_top = 1.0f; 1060 point_texcoord_bottom = 0.0f; 1061 break; 1062 } 1063 } 1064 1065 /* Build the main command buffer. */ 1066 BEGIN_CB(rs->cb_main, RS_STATE_MAIN_SIZE); 1067 OUT_CB_REG(R300_VAP_CNTL_STATUS, vap_control_status); 1068 OUT_CB_REG(R300_GA_POINT_SIZE, point_size); 1069 OUT_CB_REG_SEQ(R300_GA_POINT_MINMAX, 2); 1070 OUT_CB(point_minmax); 1071 OUT_CB(line_control); 1072 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_ENABLE, 2); 1073 OUT_CB(polygon_offset_enable); 1074 rs->cull_mode_index = 9; 1075 OUT_CB(cull_mode); 1076 OUT_CB_REG(R300_GA_LINE_STIPPLE_CONFIG, line_stipple_config); 1077 OUT_CB_REG(R300_GA_LINE_STIPPLE_VALUE, line_stipple_value); 1078 OUT_CB_REG(R300_GA_POLY_MODE, polygon_mode); 1079 OUT_CB_REG(R300_SC_CLIP_RULE, clip_rule); 1080 OUT_CB_REG_SEQ(R300_GA_POINT_S0, 4); 1081 OUT_CB_32F(point_texcoord_left); 1082 OUT_CB_32F(point_texcoord_bottom); 1083 OUT_CB_32F(point_texcoord_right); 1084 OUT_CB_32F(point_texcoord_top); 1085 END_CB; 1086 1087 /* Build the two command buffers for polygon offset setup. */ 1088 if (polygon_offset_enable) { 1089 float scale = state->offset_scale * 12; 1090 float offset = state->offset_units * 4; 1091 1092 BEGIN_CB(rs->cb_poly_offset_zb16, 5); 1093 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4); 1094 OUT_CB_32F(scale); 1095 OUT_CB_32F(offset); 1096 OUT_CB_32F(scale); 1097 OUT_CB_32F(offset); 1098 END_CB; 1099 1100 offset = state->offset_units * 2; 1101 1102 BEGIN_CB(rs->cb_poly_offset_zb24, 5); 1103 OUT_CB_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 4); 1104 OUT_CB_32F(scale); 1105 OUT_CB_32F(offset); 1106 OUT_CB_32F(scale); 1107 OUT_CB_32F(offset); 1108 END_CB; 1109 } 1110 1111 return (void*)rs; 1112} 1113 1114/* Bind rasterizer state. */ 1115static void r300_bind_rs_state(struct pipe_context* pipe, void* state) 1116{ 1117 struct r300_context* r300 = r300_context(pipe); 1118 struct r300_rs_state* rs = (struct r300_rs_state*)state; 1119 int last_sprite_coord_enable = r300->sprite_coord_enable; 1120 boolean last_two_sided_color = r300->two_sided_color; 1121 1122 if (r300->draw && rs) { 1123 draw_set_rasterizer_state(r300->draw, &rs->rs_draw, state); 1124 } 1125 1126 if (rs) { 1127 r300->polygon_offset_enabled = rs->polygon_offset_enable; 1128 r300->sprite_coord_enable = rs->rs.sprite_coord_enable; 1129 r300->two_sided_color = rs->rs.light_twoside; 1130 } else { 1131 r300->polygon_offset_enabled = FALSE; 1132 r300->sprite_coord_enable = 0; 1133 r300->two_sided_color = FALSE; 1134 } 1135 1136 UPDATE_STATE(state, r300->rs_state); 1137 r300->rs_state.size = RS_STATE_MAIN_SIZE + (r300->polygon_offset_enabled ? 5 : 0); 1138 1139 if (last_sprite_coord_enable != r300->sprite_coord_enable || 1140 last_two_sided_color != r300->two_sided_color) { 1141 r300_mark_atom_dirty(r300, &r300->rs_block_state); 1142 } 1143} 1144 1145/* Free rasterizer state. */ 1146static void r300_delete_rs_state(struct pipe_context* pipe, void* state) 1147{ 1148 FREE(state); 1149} 1150 1151static void* 1152 r300_create_sampler_state(struct pipe_context* pipe, 1153 const struct pipe_sampler_state* state) 1154{ 1155 struct r300_context* r300 = r300_context(pipe); 1156 struct r300_sampler_state* sampler = CALLOC_STRUCT(r300_sampler_state); 1157 boolean is_r500 = r300->screen->caps.is_r500; 1158 int lod_bias; 1159 1160 sampler->state = *state; 1161 1162 /* r300 doesn't handle CLAMP and MIRROR_CLAMP correctly when either MAG 1163 * or MIN filter is NEAREST. Since texwrap produces same results 1164 * for CLAMP and CLAMP_TO_EDGE, we use them instead. */ 1165 if (sampler->state.min_img_filter == PIPE_TEX_FILTER_NEAREST || 1166 sampler->state.mag_img_filter == PIPE_TEX_FILTER_NEAREST) { 1167 /* Wrap S. */ 1168 if (sampler->state.wrap_s == PIPE_TEX_WRAP_CLAMP) 1169 sampler->state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE; 1170 else if (sampler->state.wrap_s == PIPE_TEX_WRAP_MIRROR_CLAMP) 1171 sampler->state.wrap_s = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE; 1172 1173 /* Wrap T. */ 1174 if (sampler->state.wrap_t == PIPE_TEX_WRAP_CLAMP) 1175 sampler->state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE; 1176 else if (sampler->state.wrap_t == PIPE_TEX_WRAP_MIRROR_CLAMP) 1177 sampler->state.wrap_t = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE; 1178 1179 /* Wrap R. */ 1180 if (sampler->state.wrap_r == PIPE_TEX_WRAP_CLAMP) 1181 sampler->state.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE; 1182 else if (sampler->state.wrap_r == PIPE_TEX_WRAP_MIRROR_CLAMP) 1183 sampler->state.wrap_r = PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE; 1184 } 1185 1186 sampler->filter0 |= 1187 (r300_translate_wrap(sampler->state.wrap_s) << R300_TX_WRAP_S_SHIFT) | 1188 (r300_translate_wrap(sampler->state.wrap_t) << R300_TX_WRAP_T_SHIFT) | 1189 (r300_translate_wrap(sampler->state.wrap_r) << R300_TX_WRAP_R_SHIFT); 1190 1191 sampler->filter0 |= r300_translate_tex_filters(state->min_img_filter, 1192 state->mag_img_filter, 1193 state->min_mip_filter, 1194 state->max_anisotropy > 0); 1195 1196 sampler->filter0 |= r300_anisotropy(state->max_anisotropy); 1197 1198 /* Unfortunately, r300-r500 don't support floating-point mipmap lods. */ 1199 /* We must pass these to the merge function to clamp them properly. */ 1200 sampler->min_lod = (unsigned)MAX2(state->min_lod, 0); 1201 sampler->max_lod = (unsigned)MAX2(ceilf(state->max_lod), 0); 1202 1203 lod_bias = CLAMP((int)(state->lod_bias * 32 + 1), -(1 << 9), (1 << 9) - 1); 1204 1205 sampler->filter1 |= (lod_bias << R300_LOD_BIAS_SHIFT) & R300_LOD_BIAS_MASK; 1206 1207 /* This is very high quality anisotropic filtering for R5xx. 1208 * It's good for benchmarking the performance of texturing but 1209 * in practice we don't want to slow down the driver because it's 1210 * a pretty good performance killer. Feel free to play with it. */ 1211 if (DBG_ON(r300, DBG_ANISOHQ) && is_r500) { 1212 sampler->filter1 |= r500_anisotropy(state->max_anisotropy); 1213 } 1214 1215 /* R500-specific fixups and optimizations */ 1216 if (r300->screen->caps.is_r500) { 1217 sampler->filter1 |= R500_BORDER_FIX; 1218 } 1219 1220 return (void*)sampler; 1221} 1222 1223static void r300_bind_sampler_states(struct pipe_context* pipe, 1224 unsigned count, 1225 void** states) 1226{ 1227 struct r300_context* r300 = r300_context(pipe); 1228 struct r300_textures_state* state = 1229 (struct r300_textures_state*)r300->textures_state.state; 1230 unsigned tex_units = r300->screen->caps.num_tex_units; 1231 1232 if (count > tex_units) { 1233 return; 1234 } 1235 1236 memcpy(state->sampler_states, states, sizeof(void*) * count); 1237 state->sampler_state_count = count; 1238 1239 r300_mark_atom_dirty(r300, &r300->textures_state); 1240} 1241 1242static void r300_lacks_vertex_textures(struct pipe_context* pipe, 1243 unsigned count, 1244 void** states) 1245{ 1246} 1247 1248static void r300_delete_sampler_state(struct pipe_context* pipe, void* state) 1249{ 1250 FREE(state); 1251} 1252 1253static uint32_t r300_assign_texture_cache_region(unsigned index, unsigned num) 1254{ 1255 /* This looks like a hack, but I believe it's suppose to work like 1256 * that. To illustrate how this works, let's assume you have 5 textures. 1257 * From docs, 5 and the successive numbers are: 1258 * 1259 * FOURTH_1 = 5 1260 * FOURTH_2 = 6 1261 * FOURTH_3 = 7 1262 * EIGHTH_0 = 8 1263 * EIGHTH_1 = 9 1264 * 1265 * First 3 textures will get 3/4 of size of the cache, divived evenly 1266 * between them. The last 1/4 of the cache must be divided between 1267 * the last 2 textures, each will therefore get 1/8 of the cache. 1268 * Why not just to use "5 + texture_index" ? 1269 * 1270 * This simple trick works for all "num" <= 16. 1271 */ 1272 if (num <= 1) 1273 return R300_TX_CACHE(R300_TX_CACHE_WHOLE); 1274 else 1275 return R300_TX_CACHE(num + index); 1276} 1277 1278static void r300_set_fragment_sampler_views(struct pipe_context* pipe, 1279 unsigned count, 1280 struct pipe_sampler_view** views) 1281{ 1282 struct r300_context* r300 = r300_context(pipe); 1283 struct r300_textures_state* state = 1284 (struct r300_textures_state*)r300->textures_state.state; 1285 struct r300_texture *texture; 1286 unsigned i, real_num_views = 0, view_index = 0; 1287 unsigned tex_units = r300->screen->caps.num_tex_units; 1288 boolean dirty_tex = FALSE; 1289 1290 if (count > tex_units) { 1291 return; 1292 } 1293 1294 /* Calculate the real number of views. */ 1295 for (i = 0; i < count; i++) { 1296 if (views[i]) 1297 real_num_views++; 1298 } 1299 1300 for (i = 0; i < count; i++) { 1301 pipe_sampler_view_reference( 1302 (struct pipe_sampler_view**)&state->sampler_views[i], 1303 views[i]); 1304 1305 if (!views[i]) { 1306 continue; 1307 } 1308 1309 /* A new sampler view (= texture)... */ 1310 dirty_tex = TRUE; 1311 1312 /* Set the texrect factor in the fragment shader. 1313 * Needed for RECT and NPOT fallback. */ 1314 texture = r300_texture(views[i]->texture); 1315 if (texture->desc.is_npot) { 1316 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state); 1317 } 1318 1319 state->sampler_views[i]->texcache_region = 1320 r300_assign_texture_cache_region(view_index, real_num_views); 1321 view_index++; 1322 } 1323 1324 for (i = count; i < tex_units; i++) { 1325 if (state->sampler_views[i]) { 1326 pipe_sampler_view_reference( 1327 (struct pipe_sampler_view**)&state->sampler_views[i], 1328 NULL); 1329 } 1330 } 1331 1332 state->sampler_view_count = count; 1333 1334 r300_mark_atom_dirty(r300, &r300->textures_state); 1335 r300->validate_buffers = TRUE; 1336 1337 if (dirty_tex) { 1338 r300_mark_atom_dirty(r300, &r300->texture_cache_inval); 1339 } 1340} 1341 1342static struct pipe_sampler_view * 1343r300_create_sampler_view(struct pipe_context *pipe, 1344 struct pipe_resource *texture, 1345 const struct pipe_sampler_view *templ) 1346{ 1347 struct r300_sampler_view *view = CALLOC_STRUCT(r300_sampler_view); 1348 struct r300_texture *tex = r300_texture(texture); 1349 boolean is_r500 = r300_screen(pipe->screen)->caps.is_r500; 1350 boolean dxtc_swizzle = r300_screen(pipe->screen)->caps.dxtc_swizzle; 1351 1352 if (view) { 1353 view->base = *templ; 1354 view->base.reference.count = 1; 1355 view->base.context = pipe; 1356 view->base.texture = NULL; 1357 pipe_resource_reference(&view->base.texture, texture); 1358 1359 view->swizzle[0] = templ->swizzle_r; 1360 view->swizzle[1] = templ->swizzle_g; 1361 view->swizzle[2] = templ->swizzle_b; 1362 view->swizzle[3] = templ->swizzle_a; 1363 1364 view->format = tex->tx_format; 1365 view->format.format1 |= r300_translate_texformat(templ->format, 1366 view->swizzle, 1367 is_r500, 1368 dxtc_swizzle); 1369 if (is_r500) { 1370 view->format.format2 |= r500_tx_format_msb_bit(templ->format); 1371 } 1372 } 1373 1374 return (struct pipe_sampler_view*)view; 1375} 1376 1377static void 1378r300_sampler_view_destroy(struct pipe_context *pipe, 1379 struct pipe_sampler_view *view) 1380{ 1381 pipe_resource_reference(&view->texture, NULL); 1382 FREE(view); 1383} 1384 1385static void r300_set_scissor_state(struct pipe_context* pipe, 1386 const struct pipe_scissor_state* state) 1387{ 1388 struct r300_context* r300 = r300_context(pipe); 1389 1390 memcpy(r300->scissor_state.state, state, 1391 sizeof(struct pipe_scissor_state)); 1392 1393 r300_mark_atom_dirty(r300, &r300->scissor_state); 1394} 1395 1396static void r300_set_viewport_state(struct pipe_context* pipe, 1397 const struct pipe_viewport_state* state) 1398{ 1399 struct r300_context* r300 = r300_context(pipe); 1400 struct r300_viewport_state* viewport = 1401 (struct r300_viewport_state*)r300->viewport_state.state; 1402 1403 r300->viewport = *state; 1404 1405 if (r300->draw) { 1406 draw_set_viewport_state(r300->draw, state); 1407 viewport->vte_control = R300_VTX_XY_FMT | R300_VTX_Z_FMT; 1408 return; 1409 } 1410 1411 /* Do the transform in HW. */ 1412 viewport->vte_control = R300_VTX_W0_FMT; 1413 1414 if (state->scale[0] != 1.0f) { 1415 viewport->xscale = state->scale[0]; 1416 viewport->vte_control |= R300_VPORT_X_SCALE_ENA; 1417 } 1418 if (state->scale[1] != 1.0f) { 1419 viewport->yscale = state->scale[1]; 1420 viewport->vte_control |= R300_VPORT_Y_SCALE_ENA; 1421 } 1422 if (state->scale[2] != 1.0f) { 1423 viewport->zscale = state->scale[2]; 1424 viewport->vte_control |= R300_VPORT_Z_SCALE_ENA; 1425 } 1426 if (state->translate[0] != 0.0f) { 1427 viewport->xoffset = state->translate[0]; 1428 viewport->vte_control |= R300_VPORT_X_OFFSET_ENA; 1429 } 1430 if (state->translate[1] != 0.0f) { 1431 viewport->yoffset = state->translate[1]; 1432 viewport->vte_control |= R300_VPORT_Y_OFFSET_ENA; 1433 } 1434 if (state->translate[2] != 0.0f) { 1435 viewport->zoffset = state->translate[2]; 1436 viewport->vte_control |= R300_VPORT_Z_OFFSET_ENA; 1437 } 1438 1439 r300_mark_atom_dirty(r300, &r300->viewport_state); 1440 if (r300->fs.state && r300_fs(r300)->shader->inputs.wpos != ATTR_UNUSED) { 1441 r300_mark_atom_dirty(r300, &r300->fs_rc_constant_state); 1442 } 1443} 1444 1445static void r300_set_vertex_buffers(struct pipe_context* pipe, 1446 unsigned count, 1447 const struct pipe_vertex_buffer* buffers) 1448{ 1449 struct r300_context* r300 = r300_context(pipe); 1450 struct pipe_vertex_buffer *vbo; 1451 unsigned i, max_index = (1 << 24) - 1; 1452 boolean any_user_buffer = FALSE; 1453 struct pipe_vertex_buffer dummy_vb = {0}; 1454 1455 /* There must be at least one vertex buffer set, otherwise it locks up. */ 1456 if (!count) { 1457 dummy_vb.buffer = r300->dummy_vb; 1458 dummy_vb.max_index = r300->dummy_vb->width0 / 4; 1459 buffers = &dummy_vb; 1460 count = 1; 1461 } 1462 1463 if (count == r300->vertex_buffer_count && 1464 memcmp(r300->vertex_buffer, buffers, 1465 sizeof(struct pipe_vertex_buffer) * count) == 0) { 1466 return; 1467 } 1468 1469 if (r300->screen->caps.has_tcl) { 1470 /* HW TCL. */ 1471 r300->incompatible_vb_layout = FALSE; 1472 1473 /* Check if the strides and offsets are aligned to the size of DWORD. */ 1474 for (i = 0; i < count; i++) { 1475 if (buffers[i].buffer) { 1476 if (buffers[i].stride % 4 != 0 || 1477 buffers[i].buffer_offset % 4 != 0) { 1478 r300->incompatible_vb_layout = TRUE; 1479 break; 1480 } 1481 } 1482 } 1483 1484 for (i = 0; i < count; i++) { 1485 /* Why, yes, I AM casting away constness. How did you know? */ 1486 vbo = (struct pipe_vertex_buffer*)&buffers[i]; 1487 1488 /* Skip NULL buffers */ 1489 if (!buffers[i].buffer) { 1490 continue; 1491 } 1492 1493 if (r300_buffer_is_user_buffer(vbo->buffer)) { 1494 any_user_buffer = TRUE; 1495 } 1496 1497 /* The stride of zero means we will be fetching only the first 1498 * vertex, so don't care about max_index. */ 1499 if (!vbo->stride) 1500 continue; 1501 1502 if (vbo->max_index == ~0) { 1503 vbo->max_index = 1504 (vbo->buffer->width0 - vbo->buffer_offset) / vbo->stride; 1505 } 1506 1507 max_index = MIN2(vbo->max_index, max_index); 1508 } 1509 1510 r300->any_user_vbs = any_user_buffer; 1511 r300->vertex_buffer_max_index = max_index; 1512 r300->aos_dirty = TRUE; 1513 r300->validate_buffers = TRUE; 1514 } else { 1515 /* SW TCL. */ 1516 draw_set_vertex_buffers(r300->draw, count, buffers); 1517 } 1518 1519 /* Common code. */ 1520 for (i = 0; i < count; i++) { 1521 /* Reference our buffer. */ 1522 pipe_resource_reference(&r300->vertex_buffer[i].buffer, buffers[i].buffer); 1523 } 1524 for (; i < r300->vertex_buffer_count; i++) { 1525 /* Dereference any old buffers. */ 1526 pipe_resource_reference(&r300->vertex_buffer[i].buffer, NULL); 1527 } 1528 1529 memcpy(r300->vertex_buffer, buffers, 1530 sizeof(struct pipe_vertex_buffer) * count); 1531 r300->vertex_buffer_count = count; 1532} 1533 1534static void r300_set_index_buffer(struct pipe_context* pipe, 1535 const struct pipe_index_buffer *ib) 1536{ 1537 struct r300_context* r300 = r300_context(pipe); 1538 1539 if (ib) { 1540 pipe_resource_reference(&r300->index_buffer.buffer, ib->buffer); 1541 memcpy(&r300->index_buffer, ib, sizeof(r300->index_buffer)); 1542 } 1543 else { 1544 pipe_resource_reference(&r300->index_buffer.buffer, NULL); 1545 memset(&r300->index_buffer, 0, sizeof(r300->index_buffer)); 1546 } 1547 1548 if (r300->screen->caps.has_tcl) { 1549 r300->validate_buffers = TRUE; 1550 } 1551 else { 1552 draw_set_index_buffer(r300->draw, ib); 1553 } 1554} 1555 1556/* Initialize the PSC tables. */ 1557static void r300_vertex_psc(struct r300_vertex_element_state *velems) 1558{ 1559 struct r300_vertex_stream_state *vstream = &velems->vertex_stream; 1560 uint16_t type, swizzle; 1561 enum pipe_format format; 1562 unsigned i; 1563 1564 if (velems->count > 16) { 1565 fprintf(stderr, "r300: More than 16 vertex elements are not supported," 1566 " requested %i, using 16.\n", velems->count); 1567 velems->count = 16; 1568 } 1569 1570 /* Vertex shaders have no semantics on their inputs, 1571 * so PSC should just route stuff based on the vertex elements, 1572 * and not on attrib information. */ 1573 for (i = 0; i < velems->count; i++) { 1574 format = velems->hw_format[i]; 1575 1576 type = r300_translate_vertex_data_type(format); 1577 if (type == R300_INVALID_FORMAT) { 1578 fprintf(stderr, "r300: Bad vertex format %s.\n", 1579 util_format_short_name(format)); 1580 assert(0); 1581 abort(); 1582 } 1583 1584 type |= i << R300_DST_VEC_LOC_SHIFT; 1585 swizzle = r300_translate_vertex_data_swizzle(format); 1586 1587 if (i & 1) { 1588 vstream->vap_prog_stream_cntl[i >> 1] |= type << 16; 1589 vstream->vap_prog_stream_cntl_ext[i >> 1] |= swizzle << 16; 1590 } else { 1591 vstream->vap_prog_stream_cntl[i >> 1] |= type; 1592 vstream->vap_prog_stream_cntl_ext[i >> 1] |= swizzle; 1593 } 1594 } 1595 1596 /* Set the last vector in the PSC. */ 1597 if (i) { 1598 i -= 1; 1599 } 1600 vstream->vap_prog_stream_cntl[i >> 1] |= 1601 (R300_LAST_VEC << (i & 1 ? 16 : 0)); 1602 1603 vstream->count = (i >> 1) + 1; 1604} 1605 1606#define FORMAT_REPLACE(what, withwhat) \ 1607 case PIPE_FORMAT_##what: *format = PIPE_FORMAT_##withwhat; break 1608 1609static void* r300_create_vertex_elements_state(struct pipe_context* pipe, 1610 unsigned count, 1611 const struct pipe_vertex_element* attribs) 1612{ 1613 struct r300_vertex_element_state *velems; 1614 unsigned i; 1615 enum pipe_format *format; 1616 struct pipe_vertex_element dummy_attrib = {0}; 1617 1618 /* R300 Programmable Stream Control (PSC) doesn't support 0 vertex elements. */ 1619 if (!count) { 1620 dummy_attrib.src_format = PIPE_FORMAT_R8G8B8A8_UNORM; 1621 attribs = &dummy_attrib; 1622 count = 1; 1623 } 1624 1625 assert(count <= PIPE_MAX_ATTRIBS); 1626 velems = CALLOC_STRUCT(r300_vertex_element_state); 1627 if (velems != NULL) { 1628 velems->count = count; 1629 memcpy(velems->velem, attribs, sizeof(struct pipe_vertex_element) * count); 1630 1631 if (r300_screen(pipe->screen)->caps.has_tcl) { 1632 /* Set the best hw format in case the original format is not 1633 * supported by hw. */ 1634 for (i = 0; i < count; i++) { 1635 velems->hw_format[i] = velems->velem[i].src_format; 1636 format = &velems->hw_format[i]; 1637 1638 /* This is basically the list of unsupported formats. 1639 * For now we don't care about the alignment, that's going to 1640 * be sorted out after the PSC setup. */ 1641 switch (*format) { 1642 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); 1643 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); 1644 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT); 1645 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT); 1646 1647 FORMAT_REPLACE(R32_UNORM, R32_FLOAT); 1648 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); 1649 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT); 1650 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT); 1651 1652 FORMAT_REPLACE(R32_USCALED, R32_FLOAT); 1653 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); 1654 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT); 1655 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT); 1656 1657 FORMAT_REPLACE(R32_SNORM, R32_FLOAT); 1658 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT); 1659 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT); 1660 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT); 1661 1662 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT); 1663 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT); 1664 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT); 1665 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT); 1666 1667 FORMAT_REPLACE(R32_FIXED, R32_FLOAT); 1668 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); 1669 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT); 1670 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT); 1671 1672 default:; 1673 } 1674 1675 velems->incompatible_layout = 1676 velems->incompatible_layout || 1677 velems->velem[i].src_format != velems->hw_format[i] || 1678 velems->velem[i].src_offset % 4 != 0; 1679 } 1680 1681 /* Now setup PSC. 1682 * The unused components will be replaced by (..., 0, 1). */ 1683 r300_vertex_psc(velems); 1684 1685 /* Align the formats to the size of DWORD. 1686 * We only care about the blocksizes of the formats since 1687 * swizzles are already set up. 1688 * Also compute the vertex size. */ 1689 for (i = 0; i < count; i++) { 1690 /* This is OK because we check for aligned strides too 1691 * elsewhere. */ 1692 velems->hw_format_size[i] = 1693 align(util_format_get_blocksize(velems->hw_format[i]), 4); 1694 velems->vertex_size_dwords += velems->hw_format_size[i] / 4; 1695 } 1696 } 1697 } 1698 return velems; 1699} 1700 1701static void r300_bind_vertex_elements_state(struct pipe_context *pipe, 1702 void *state) 1703{ 1704 struct r300_context *r300 = r300_context(pipe); 1705 struct r300_vertex_element_state *velems = state; 1706 1707 if (velems == NULL) { 1708 return; 1709 } 1710 1711 r300->velems = velems; 1712 1713 if (r300->draw) { 1714 draw_set_vertex_elements(r300->draw, velems->count, velems->velem); 1715 return; 1716 } 1717 1718 UPDATE_STATE(&velems->vertex_stream, r300->vertex_stream_state); 1719 r300->vertex_stream_state.size = (1 + velems->vertex_stream.count) * 2; 1720 r300->aos_dirty = TRUE; 1721} 1722 1723static void r300_delete_vertex_elements_state(struct pipe_context *pipe, void *state) 1724{ 1725 FREE(state); 1726} 1727 1728static void* r300_create_vs_state(struct pipe_context* pipe, 1729 const struct pipe_shader_state* shader) 1730{ 1731 struct r300_context* r300 = r300_context(pipe); 1732 struct r300_vertex_shader* vs = CALLOC_STRUCT(r300_vertex_shader); 1733 1734 /* Copy state directly into shader. */ 1735 vs->state = *shader; 1736 vs->state.tokens = tgsi_dup_tokens(shader->tokens); 1737 1738 if (r300->screen->caps.has_tcl) { 1739 r300_init_vs_outputs(vs); 1740 r300_translate_vertex_shader(r300, vs); 1741 } else { 1742 r300_draw_init_vertex_shader(r300->draw, vs); 1743 } 1744 1745 return vs; 1746} 1747 1748static void r300_bind_vs_state(struct pipe_context* pipe, void* shader) 1749{ 1750 struct r300_context* r300 = r300_context(pipe); 1751 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader; 1752 1753 if (vs == NULL) { 1754 r300->vs_state.state = NULL; 1755 return; 1756 } 1757 if (vs == r300->vs_state.state) { 1758 return; 1759 } 1760 r300->vs_state.state = vs; 1761 1762 /* The majority of the RS block bits is dependent on the vertex shader. */ 1763 r300_mark_atom_dirty(r300, &r300->rs_block_state); /* Will be updated before the emission. */ 1764 1765 if (r300->screen->caps.has_tcl) { 1766 unsigned fc_op_dwords = r300->screen->caps.is_r500 ? 3 : 2; 1767 r300_mark_atom_dirty(r300, &r300->vs_state); 1768 r300->vs_state.size = 1769 vs->code.length + 9 + 1770 (vs->code.num_fc_ops ? vs->code.num_fc_ops * fc_op_dwords + 4 : 0); 1771 1772 r300_mark_atom_dirty(r300, &r300->vs_constants); 1773 r300->vs_constants.size = 1774 2 + 1775 (vs->externals_count ? vs->externals_count * 4 + 3 : 0) + 1776 (vs->immediates_count ? vs->immediates_count * 4 + 3 : 0); 1777 1778 ((struct r300_constant_buffer*)r300->vs_constants.state)->remap_table = 1779 vs->code.constants_remap_table; 1780 1781 r300_mark_atom_dirty(r300, &r300->pvs_flush); 1782 } else { 1783 draw_bind_vertex_shader(r300->draw, 1784 (struct draw_vertex_shader*)vs->draw_vs); 1785 } 1786} 1787 1788static void r300_delete_vs_state(struct pipe_context* pipe, void* shader) 1789{ 1790 struct r300_context* r300 = r300_context(pipe); 1791 struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader; 1792 1793 if (r300->screen->caps.has_tcl) { 1794 rc_constants_destroy(&vs->code.constants); 1795 if (vs->code.constants_remap_table) 1796 FREE(vs->code.constants_remap_table); 1797 } else { 1798 draw_delete_vertex_shader(r300->draw, 1799 (struct draw_vertex_shader*)vs->draw_vs); 1800 } 1801 1802 FREE((void*)vs->state.tokens); 1803 FREE(shader); 1804} 1805 1806static void r300_set_constant_buffer(struct pipe_context *pipe, 1807 uint shader, uint index, 1808 struct pipe_resource *buf) 1809{ 1810 struct r300_context* r300 = r300_context(pipe); 1811 struct r300_constant_buffer *cbuf; 1812 uint32_t *mapped; 1813 1814 switch (shader) { 1815 case PIPE_SHADER_VERTEX: 1816 cbuf = (struct r300_constant_buffer*)r300->vs_constants.state; 1817 break; 1818 case PIPE_SHADER_FRAGMENT: 1819 cbuf = (struct r300_constant_buffer*)r300->fs_constants.state; 1820 break; 1821 default: 1822 return; 1823 } 1824 1825 if (buf == NULL || buf->width0 == 0 || 1826 (mapped = (uint32_t*)r300_buffer(buf)->constant_buffer) == NULL) { 1827 return; 1828 } 1829 1830 if (shader == PIPE_SHADER_FRAGMENT || 1831 (shader == PIPE_SHADER_VERTEX && r300->screen->caps.has_tcl)) { 1832 assert((buf->width0 % (4 * sizeof(float))) == 0); 1833 cbuf->ptr = mapped; 1834 } 1835 1836 if (shader == PIPE_SHADER_VERTEX) { 1837 if (r300->screen->caps.has_tcl) { 1838 struct r300_vertex_shader *vs = 1839 (struct r300_vertex_shader*)r300->vs_state.state; 1840 1841 if (!vs) { 1842 cbuf->buffer_base = 0; 1843 return; 1844 } 1845 1846 cbuf->buffer_base = r300->vs_const_base; 1847 r300->vs_const_base += vs->code.constants.Count; 1848 if (r300->vs_const_base > R500_MAX_PVS_CONST_VECS) { 1849 r300->vs_const_base = vs->code.constants.Count; 1850 cbuf->buffer_base = 0; 1851 r300_mark_atom_dirty(r300, &r300->pvs_flush); 1852 } 1853 r300_mark_atom_dirty(r300, &r300->vs_constants); 1854 } else if (r300->draw) { 1855 draw_set_mapped_constant_buffer(r300->draw, PIPE_SHADER_VERTEX, 1856 0, mapped, buf->width0); 1857 } 1858 } else if (shader == PIPE_SHADER_FRAGMENT) { 1859 r300_mark_atom_dirty(r300, &r300->fs_constants); 1860 } 1861} 1862 1863void r300_init_state_functions(struct r300_context* r300) 1864{ 1865 r300->context.create_blend_state = r300_create_blend_state; 1866 r300->context.bind_blend_state = r300_bind_blend_state; 1867 r300->context.delete_blend_state = r300_delete_blend_state; 1868 1869 r300->context.set_blend_color = r300_set_blend_color; 1870 1871 r300->context.set_clip_state = r300_set_clip_state; 1872 r300->context.set_sample_mask = r300_set_sample_mask; 1873 1874 r300->context.set_constant_buffer = r300_set_constant_buffer; 1875 1876 r300->context.create_depth_stencil_alpha_state = r300_create_dsa_state; 1877 r300->context.bind_depth_stencil_alpha_state = r300_bind_dsa_state; 1878 r300->context.delete_depth_stencil_alpha_state = r300_delete_dsa_state; 1879 1880 r300->context.set_stencil_ref = r300_set_stencil_ref; 1881 1882 r300->context.set_framebuffer_state = r300_set_framebuffer_state; 1883 1884 r300->context.create_fs_state = r300_create_fs_state; 1885 r300->context.bind_fs_state = r300_bind_fs_state; 1886 r300->context.delete_fs_state = r300_delete_fs_state; 1887 1888 r300->context.set_polygon_stipple = r300_set_polygon_stipple; 1889 1890 r300->context.create_rasterizer_state = r300_create_rs_state; 1891 r300->context.bind_rasterizer_state = r300_bind_rs_state; 1892 r300->context.delete_rasterizer_state = r300_delete_rs_state; 1893 1894 r300->context.create_sampler_state = r300_create_sampler_state; 1895 r300->context.bind_fragment_sampler_states = r300_bind_sampler_states; 1896 r300->context.bind_vertex_sampler_states = r300_lacks_vertex_textures; 1897 r300->context.delete_sampler_state = r300_delete_sampler_state; 1898 1899 r300->context.set_fragment_sampler_views = r300_set_fragment_sampler_views; 1900 r300->context.create_sampler_view = r300_create_sampler_view; 1901 r300->context.sampler_view_destroy = r300_sampler_view_destroy; 1902 1903 r300->context.set_scissor_state = r300_set_scissor_state; 1904 1905 r300->context.set_viewport_state = r300_set_viewport_state; 1906 1907 r300->context.set_vertex_buffers = r300_set_vertex_buffers; 1908 r300->context.set_index_buffer = r300_set_index_buffer; 1909 1910 r300->context.create_vertex_elements_state = r300_create_vertex_elements_state; 1911 r300->context.bind_vertex_elements_state = r300_bind_vertex_elements_state; 1912 r300->context.delete_vertex_elements_state = r300_delete_vertex_elements_state; 1913 1914 r300->context.create_vs_state = r300_create_vs_state; 1915 r300->context.bind_vs_state = r300_bind_vs_state; 1916 r300->context.delete_vs_state = r300_delete_vs_state; 1917} 1918