xorg_exa_tgsi.c revision 2cfbbc76e445d88bdac7dd4dd22aaf36bbc8e4cc
1#include "xorg_exa_tgsi.h" 2 3/*### stupidity defined in X11/extensions/XI.h */ 4#undef Absolute 5 6#include "pipe/p_format.h" 7#include "pipe/p_context.h" 8#include "pipe/p_state.h" 9#include "pipe/p_inlines.h" 10#include "pipe/p_shader_tokens.h" 11 12#include "util/u_memory.h" 13#include "util/u_simple_shaders.h" 14 15#include "tgsi/tgsi_ureg.h" 16 17#include "cso_cache/cso_context.h" 18#include "cso_cache/cso_hash.h" 19 20/* Vertex shader: 21 * IN[0] = vertex pos 22 * IN[1] = src tex coord | solid fill color 23 * IN[2] = mask tex coord 24 * IN[3] = dst tex coord 25 * CONST[0] = (2/dst_width, 2/dst_height, 1, 1) 26 * CONST[1] = (-1, -1, 0, 0) 27 * 28 * OUT[0] = vertex pos 29 * OUT[1] = src tex coord | solid fill color 30 * OUT[2] = mask tex coord 31 * OUT[3] = dst tex coord 32 */ 33 34/* Fragment shader: 35 * SAMP[0] = src 36 * SAMP[1] = mask 37 * SAMP[2] = dst 38 * IN[0] = pos src | solid fill color 39 * IN[1] = pos mask 40 * IN[2] = pos dst 41 * CONST[0] = (0, 0, 0, 1) 42 * 43 * OUT[0] = color 44 */ 45 46struct xorg_shaders { 47 struct xorg_renderer *r; 48 49 struct cso_hash *vs_hash; 50 struct cso_hash *fs_hash; 51}; 52 53static INLINE void 54src_in_mask(struct ureg_program *ureg, 55 struct ureg_dst dst, 56 struct ureg_src src, 57 struct ureg_src mask, 58 int component_alpha) 59{ 60 if (component_alpha == FS_CA_FULL) { 61 ureg_MUL(ureg, dst, src, mask); 62 } else if (component_alpha == FS_CA_SRCALPHA) { 63 ureg_MUL(ureg, dst, 64 ureg_scalar(src, TGSI_SWIZZLE_W), mask); 65 } 66 else { 67 ureg_MUL(ureg, dst, src, 68 ureg_scalar(mask, TGSI_SWIZZLE_X)); 69 } 70} 71 72static struct ureg_src 73vs_normalize_coords(struct ureg_program *ureg, struct ureg_src coords, 74 struct ureg_src const0, struct ureg_src const1) 75{ 76 struct ureg_dst tmp = ureg_DECL_temporary(ureg); 77 struct ureg_src ret; 78 ureg_MAD(ureg, tmp, coords, const0, const1); 79 ret = ureg_src(tmp); 80 ureg_release_temporary(ureg, tmp); 81 return ret; 82} 83 84static void 85linear_gradient(struct ureg_program *ureg, 86 struct ureg_dst out, 87 struct ureg_src pos, 88 struct ureg_src sampler, 89 struct ureg_src coords, 90 struct ureg_src const0124, 91 struct ureg_src matrow0, 92 struct ureg_src matrow1, 93 struct ureg_src matrow2) 94{ 95 struct ureg_dst temp0 = ureg_DECL_temporary(ureg); 96 struct ureg_dst temp1 = ureg_DECL_temporary(ureg); 97 struct ureg_dst temp2 = ureg_DECL_temporary(ureg); 98 struct ureg_dst temp3 = ureg_DECL_temporary(ureg); 99 struct ureg_dst temp4 = ureg_DECL_temporary(ureg); 100 struct ureg_dst temp5 = ureg_DECL_temporary(ureg); 101 102 ureg_MOV(ureg, 103 ureg_writemask(temp0, TGSI_WRITEMASK_XY), pos); 104 ureg_MOV(ureg, 105 ureg_writemask(temp0, TGSI_WRITEMASK_Z), 106 ureg_scalar(const0124, TGSI_SWIZZLE_Y)); 107 108 ureg_DP3(ureg, temp1, matrow0, ureg_src(temp0)); 109 ureg_DP3(ureg, temp2, matrow1, ureg_src(temp0)); 110 ureg_DP3(ureg, temp3, matrow2, ureg_src(temp0)); 111 ureg_RCP(ureg, temp3, ureg_src(temp3)); 112 ureg_MUL(ureg, temp1, ureg_src(temp1), ureg_src(temp3)); 113 ureg_MUL(ureg, temp2, ureg_src(temp2), ureg_src(temp3)); 114 115 ureg_MOV(ureg, ureg_writemask(temp4, TGSI_WRITEMASK_X), 116 ureg_src(temp1)); 117 ureg_MOV(ureg, ureg_writemask(temp4, TGSI_WRITEMASK_Y), 118 ureg_src(temp2)); 119 120 ureg_MUL(ureg, temp0, 121 ureg_scalar(coords, TGSI_SWIZZLE_Y), 122 ureg_scalar(ureg_src(temp4), TGSI_SWIZZLE_Y)); 123 ureg_MAD(ureg, temp1, 124 ureg_scalar(coords, TGSI_SWIZZLE_X), 125 ureg_scalar(ureg_src(temp4), TGSI_SWIZZLE_X), 126 ureg_src(temp0)); 127 128 ureg_MUL(ureg, temp2, 129 ureg_src(temp1), 130 ureg_scalar(coords, TGSI_SWIZZLE_Z)); 131 132 ureg_TEX(ureg, out, 133 TGSI_TEXTURE_1D, ureg_src(temp2), sampler); 134 135 ureg_release_temporary(ureg, temp0); 136 ureg_release_temporary(ureg, temp1); 137 ureg_release_temporary(ureg, temp2); 138 ureg_release_temporary(ureg, temp3); 139 ureg_release_temporary(ureg, temp4); 140 ureg_release_temporary(ureg, temp5); 141} 142 143 144static void 145radial_gradient(struct ureg_program *ureg, 146 struct ureg_dst out, 147 struct ureg_src pos, 148 struct ureg_src sampler, 149 struct ureg_src coords, 150 struct ureg_src const0124, 151 struct ureg_src matrow0, 152 struct ureg_src matrow1, 153 struct ureg_src matrow2) 154{ 155 struct ureg_dst temp0 = ureg_DECL_temporary(ureg); 156 struct ureg_dst temp1 = ureg_DECL_temporary(ureg); 157 struct ureg_dst temp2 = ureg_DECL_temporary(ureg); 158 struct ureg_dst temp3 = ureg_DECL_temporary(ureg); 159 struct ureg_dst temp4 = ureg_DECL_temporary(ureg); 160 struct ureg_dst temp5 = ureg_DECL_temporary(ureg); 161 162 ureg_MOV(ureg, 163 ureg_writemask(temp0, TGSI_WRITEMASK_XY), 164 pos); 165 ureg_MOV(ureg, 166 ureg_writemask(temp0, TGSI_WRITEMASK_Z), 167 ureg_scalar(const0124, TGSI_SWIZZLE_Y)); 168 169 ureg_DP3(ureg, temp1, matrow0, ureg_src(temp0)); 170 ureg_DP3(ureg, temp2, matrow1, ureg_src(temp0)); 171 ureg_DP3(ureg, temp3, matrow2, ureg_src(temp0)); 172 ureg_RCP(ureg, temp3, ureg_src(temp3)); 173 ureg_MUL(ureg, temp1, ureg_src(temp1), ureg_src(temp3)); 174 ureg_MUL(ureg, temp2, ureg_src(temp2), ureg_src(temp3)); 175 176 ureg_MOV(ureg, ureg_writemask(temp5, TGSI_WRITEMASK_X), 177 ureg_src(temp1)); 178 ureg_MOV(ureg, ureg_writemask(temp5, TGSI_WRITEMASK_Y), 179 ureg_src(temp2)); 180 181 ureg_MUL(ureg, temp0, ureg_scalar(coords, TGSI_SWIZZLE_Y), 182 ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_Y)); 183 ureg_MAD(ureg, temp1, 184 ureg_scalar(coords, TGSI_SWIZZLE_X), 185 ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_X), 186 ureg_src(temp0)); 187 ureg_ADD(ureg, temp1, 188 ureg_src(temp1), ureg_src(temp1)); 189 ureg_MUL(ureg, temp3, 190 ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_Y), 191 ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_Y)); 192 ureg_MAD(ureg, temp4, 193 ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_X), 194 ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_X), 195 ureg_src(temp3)); 196 ureg_MOV(ureg, temp4, ureg_negate(ureg_src(temp4))); 197 ureg_MUL(ureg, temp2, 198 ureg_scalar(coords, TGSI_SWIZZLE_Z), 199 ureg_src(temp4)); 200 ureg_MUL(ureg, temp0, 201 ureg_scalar(const0124, TGSI_SWIZZLE_W), 202 ureg_src(temp2)); 203 ureg_MUL(ureg, temp3, 204 ureg_src(temp1), ureg_src(temp1)); 205 ureg_SUB(ureg, temp2, 206 ureg_src(temp3), ureg_src(temp0)); 207 ureg_RSQ(ureg, temp2, ureg_abs(ureg_src(temp2))); 208 ureg_RCP(ureg, temp2, ureg_src(temp2)); 209 ureg_SUB(ureg, temp1, 210 ureg_src(temp2), ureg_src(temp1)); 211 ureg_ADD(ureg, temp0, 212 ureg_scalar(coords, TGSI_SWIZZLE_Z), 213 ureg_scalar(coords, TGSI_SWIZZLE_Z)); 214 ureg_RCP(ureg, temp0, ureg_src(temp0)); 215 ureg_MUL(ureg, temp2, 216 ureg_src(temp1), ureg_src(temp0)); 217 ureg_TEX(ureg, out, TGSI_TEXTURE_1D, 218 ureg_src(temp2), sampler); 219 220 ureg_release_temporary(ureg, temp0); 221 ureg_release_temporary(ureg, temp1); 222 ureg_release_temporary(ureg, temp2); 223 ureg_release_temporary(ureg, temp3); 224 ureg_release_temporary(ureg, temp4); 225 ureg_release_temporary(ureg, temp5); 226} 227 228static void * 229create_vs(struct pipe_context *pipe, 230 unsigned vs_traits) 231{ 232 struct ureg_program *ureg; 233 struct ureg_src src; 234 struct ureg_dst dst; 235 struct ureg_src const0, const1; 236 boolean is_fill = (vs_traits & VS_FILL) != 0; 237 boolean is_composite = (vs_traits & VS_COMPOSITE) != 0; 238 boolean has_mask = (vs_traits & VS_MASK) != 0; 239 boolean is_yuv = (vs_traits & VS_YUV) != 0; 240 unsigned input_slot = 0; 241 242 ureg = ureg_create(TGSI_PROCESSOR_VERTEX); 243 if (ureg == NULL) 244 return 0; 245 246 const0 = ureg_DECL_constant(ureg, 0); 247 const1 = ureg_DECL_constant(ureg, 1); 248 249 /* it has to be either a fill or a composite op */ 250 debug_assert((is_fill ^ is_composite) ^ is_yuv); 251 252 src = ureg_DECL_vs_input(ureg, input_slot++); 253 dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0); 254 src = vs_normalize_coords(ureg, src, 255 const0, const1); 256 ureg_MOV(ureg, dst, src); 257 258 if (is_yuv) { 259 src = ureg_DECL_vs_input(ureg, input_slot++); 260 dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0); 261 ureg_MOV(ureg, dst, src); 262 } 263 264 if (is_composite) { 265 src = ureg_DECL_vs_input(ureg, input_slot++); 266 dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0); 267 ureg_MOV(ureg, dst, src); 268 } 269 270 if (is_fill) { 271 src = ureg_DECL_vs_input(ureg, input_slot++); 272 dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0); 273 ureg_MOV(ureg, dst, src); 274 } 275 276 if (has_mask) { 277 src = ureg_DECL_vs_input(ureg, input_slot++); 278 dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 1); 279 ureg_MOV(ureg, dst, src); 280 } 281 282 ureg_END(ureg); 283 284 return ureg_create_shader_and_destroy(ureg, pipe); 285} 286 287static void * 288create_yuv_shader(struct pipe_context *pipe, struct ureg_program *ureg) 289{ 290 struct ureg_src y_sampler, u_sampler, v_sampler; 291 struct ureg_src pos; 292 struct ureg_src matrow0, matrow1, matrow2; 293 struct ureg_dst y, u, v, rgb; 294 struct ureg_dst out = ureg_DECL_output(ureg, 295 TGSI_SEMANTIC_COLOR, 296 0); 297 298 pos = ureg_DECL_fs_input(ureg, 299 TGSI_SEMANTIC_GENERIC, 300 0, 301 TGSI_INTERPOLATE_PERSPECTIVE); 302 303 rgb = ureg_DECL_temporary(ureg); 304 y = ureg_DECL_temporary(ureg); 305 u = ureg_DECL_temporary(ureg); 306 v = ureg_DECL_temporary(ureg); 307 308 y_sampler = ureg_DECL_sampler(ureg, 0); 309 u_sampler = ureg_DECL_sampler(ureg, 1); 310 v_sampler = ureg_DECL_sampler(ureg, 2); 311 312 matrow0 = ureg_DECL_constant(ureg, 0); 313 matrow1 = ureg_DECL_constant(ureg, 1); 314 matrow2 = ureg_DECL_constant(ureg, 2); 315 316 ureg_TEX(ureg, y, 317 TGSI_TEXTURE_2D, pos, y_sampler); 318 ureg_TEX(ureg, u, 319 TGSI_TEXTURE_2D, pos, u_sampler); 320 ureg_TEX(ureg, v, 321 TGSI_TEXTURE_2D, pos, v_sampler); 322 323 ureg_SUB(ureg, u, ureg_src(u), 324 ureg_scalar(matrow0, TGSI_SWIZZLE_W)); 325 ureg_SUB(ureg, v, ureg_src(v), 326 ureg_scalar(matrow0, TGSI_SWIZZLE_W)); 327 328 ureg_MUL(ureg, rgb, 329 ureg_scalar(ureg_src(y), TGSI_SWIZZLE_X), 330 matrow0); 331 ureg_MAD(ureg, rgb, 332 ureg_scalar(ureg_src(u), TGSI_SWIZZLE_X), 333 matrow1, 334 ureg_src(rgb)); 335 ureg_MAD(ureg, rgb, 336 ureg_scalar(ureg_src(v), TGSI_SWIZZLE_X), 337 matrow2, 338 ureg_src(rgb)); 339 340 /* rgb.a = 1; */ 341 ureg_MOV(ureg, ureg_writemask(rgb, TGSI_WRITEMASK_W), 342 ureg_scalar(matrow0, TGSI_SWIZZLE_X)); 343 344 ureg_MOV(ureg, out, ureg_src(rgb)); 345 346 ureg_release_temporary(ureg, rgb); 347 ureg_release_temporary(ureg, y); 348 ureg_release_temporary(ureg, u); 349 ureg_release_temporary(ureg, v); 350 351 ureg_END(ureg); 352 353 return ureg_create_shader_and_destroy(ureg, pipe); 354} 355 356 357static INLINE void 358xrender_tex(struct ureg_program *ureg, 359 struct ureg_dst dst, 360 struct ureg_src coords, 361 struct ureg_src sampler, 362 boolean repeat_none) 363{ 364 if (repeat_none) { 365 struct ureg_dst tmp0 = ureg_DECL_temporary(ureg); 366 struct ureg_dst tmp1 = ureg_DECL_temporary(ureg); 367 struct ureg_src const0 = ureg_DECL_constant(ureg, 0); 368 unsigned label; 369 ureg_SLT(ureg, tmp1, ureg_swizzle(coords, 370 TGSI_SWIZZLE_X, 371 TGSI_SWIZZLE_Y, 372 TGSI_SWIZZLE_X, 373 TGSI_SWIZZLE_Y), 374 ureg_scalar(const0, TGSI_SWIZZLE_X)); 375 ureg_SGT(ureg, tmp0, ureg_swizzle(coords, 376 TGSI_SWIZZLE_X, 377 TGSI_SWIZZLE_Y, 378 TGSI_SWIZZLE_X, 379 TGSI_SWIZZLE_Y), 380 ureg_scalar(const0, TGSI_SWIZZLE_W)); 381 ureg_MAX(ureg, tmp0, ureg_src(tmp0), ureg_src(tmp1)); 382 ureg_MAX(ureg, tmp0, ureg_scalar(ureg_src(tmp0), TGSI_SWIZZLE_X), 383 ureg_scalar(ureg_src(tmp0), TGSI_SWIZZLE_Y)); 384 label = ureg_get_instruction_number(ureg) + 2; 385 ureg_IF(ureg, ureg_src(tmp0), &label); 386 ureg_MOV(ureg, dst, ureg_scalar(const0, TGSI_SWIZZLE_X)); 387 label += 2; 388 ureg_ELSE(ureg, &label); 389 ureg_TEX(ureg, dst, TGSI_TEXTURE_2D, coords, sampler); 390 ureg_ENDIF(ureg); 391 ureg_release_temporary(ureg, tmp0); 392 ureg_release_temporary(ureg, tmp1); 393 } else 394 ureg_TEX(ureg, dst, TGSI_TEXTURE_2D, coords, sampler); 395} 396 397static void * 398create_fs(struct pipe_context *pipe, 399 unsigned fs_traits) 400{ 401 struct ureg_program *ureg; 402 struct ureg_src /*dst_sampler,*/ src_sampler, mask_sampler; 403 struct ureg_src /*dst_pos,*/ src_input, mask_pos; 404 struct ureg_dst src, mask; 405 struct ureg_dst out; 406 unsigned has_mask = (fs_traits & FS_MASK) != 0; 407 unsigned is_fill = (fs_traits & FS_FILL) != 0; 408 unsigned is_composite = (fs_traits & FS_COMPOSITE) != 0; 409 unsigned is_solid = (fs_traits & FS_SOLID_FILL) != 0; 410 unsigned is_lingrad = (fs_traits & FS_LINGRAD_FILL) != 0; 411 unsigned is_radgrad = (fs_traits & FS_RADGRAD_FILL) != 0; 412 unsigned comp_alpha = (fs_traits & FS_COMPONENT_ALPHA) != 0; 413 unsigned is_yuv = (fs_traits & FS_YUV) != 0; 414 unsigned src_repeat_none = (fs_traits & FS_SRC_REPEAT_NONE) != 0; 415 unsigned mask_repeat_none = (fs_traits & FS_MASK_REPEAT_NONE) != 0; 416 417 ureg = ureg_create(TGSI_PROCESSOR_FRAGMENT); 418 if (ureg == NULL) 419 return 0; 420 421 /* it has to be either a fill, a composite op or a yuv conversion */ 422 debug_assert((is_fill ^ is_composite) ^ is_yuv); 423 424 out = ureg_DECL_output(ureg, 425 TGSI_SEMANTIC_COLOR, 426 0); 427 428 if (is_composite) { 429 src_sampler = ureg_DECL_sampler(ureg, 0); 430 src_input = ureg_DECL_fs_input(ureg, 431 TGSI_SEMANTIC_GENERIC, 432 0, 433 TGSI_INTERPOLATE_PERSPECTIVE); 434 } else if (is_fill) { 435 if (is_solid) 436 src_input = ureg_DECL_fs_input(ureg, 437 TGSI_SEMANTIC_COLOR, 438 0, 439 TGSI_INTERPOLATE_PERSPECTIVE); 440 else 441 src_input = ureg_DECL_fs_input(ureg, 442 TGSI_SEMANTIC_POSITION, 443 0, 444 TGSI_INTERPOLATE_PERSPECTIVE); 445 } else { 446 debug_assert(is_yuv); 447 return create_yuv_shader(pipe, ureg); 448 } 449 450 if (has_mask) { 451 mask_sampler = ureg_DECL_sampler(ureg, 1); 452 mask_pos = ureg_DECL_fs_input(ureg, 453 TGSI_SEMANTIC_GENERIC, 454 1, 455 TGSI_INTERPOLATE_PERSPECTIVE); 456 } 457 458#if 0 /* unused right now */ 459 dst_sampler = ureg_DECL_sampler(ureg, 2); 460 dst_pos = ureg_DECL_fs_input(ureg, 461 TGSI_SEMANTIC_POSITION, 462 2, 463 TGSI_INTERPOLATE_PERSPECTIVE); 464#endif 465 466 if (is_composite) { 467 if (has_mask) 468 src = ureg_DECL_temporary(ureg); 469 else 470 src = out; 471 xrender_tex(ureg, src, src_input, src_sampler, 472 src_repeat_none); 473 } else if (is_fill) { 474 if (is_solid) { 475 if (has_mask) 476 src = ureg_dst(src_input); 477 else 478 ureg_MOV(ureg, out, src_input); 479 } else if (is_lingrad || is_radgrad) { 480 struct ureg_src coords, const0124, 481 matrow0, matrow1, matrow2; 482 483 if (has_mask) 484 src = ureg_DECL_temporary(ureg); 485 else 486 src = out; 487 488 coords = ureg_DECL_constant(ureg, 0); 489 const0124 = ureg_DECL_constant(ureg, 1); 490 matrow0 = ureg_DECL_constant(ureg, 2); 491 matrow1 = ureg_DECL_constant(ureg, 3); 492 matrow2 = ureg_DECL_constant(ureg, 4); 493 494 if (is_lingrad) { 495 linear_gradient(ureg, src, 496 src_input, src_sampler, 497 coords, const0124, 498 matrow0, matrow1, matrow2); 499 } else if (is_radgrad) { 500 radial_gradient(ureg, src, 501 src_input, src_sampler, 502 coords, const0124, 503 matrow0, matrow1, matrow2); 504 } 505 } else 506 debug_assert(!"Unknown fill type!"); 507 } 508 509 if (has_mask) { 510 mask = ureg_DECL_temporary(ureg); 511 xrender_tex(ureg, mask, mask_pos, mask_sampler, 512 mask_repeat_none); 513 /* src IN mask */ 514 src_in_mask(ureg, out, ureg_src(src), ureg_src(mask), comp_alpha); 515 ureg_release_temporary(ureg, mask); 516 } 517 518 ureg_END(ureg); 519 520 return ureg_create_shader_and_destroy(ureg, pipe); 521} 522 523struct xorg_shaders * xorg_shaders_create(struct xorg_renderer *r) 524{ 525 struct xorg_shaders *sc = CALLOC_STRUCT(xorg_shaders); 526 527 sc->r = r; 528 sc->vs_hash = cso_hash_create(); 529 sc->fs_hash = cso_hash_create(); 530 531 return sc; 532} 533 534static void 535cache_destroy(struct cso_context *cso, 536 struct cso_hash *hash, 537 unsigned processor) 538{ 539 struct cso_hash_iter iter = cso_hash_first_node(hash); 540 while (!cso_hash_iter_is_null(iter)) { 541 void *shader = (void *)cso_hash_iter_data(iter); 542 if (processor == PIPE_SHADER_FRAGMENT) { 543 cso_delete_fragment_shader(cso, shader); 544 } else if (processor == PIPE_SHADER_VERTEX) { 545 cso_delete_vertex_shader(cso, shader); 546 } 547 iter = cso_hash_erase(hash, iter); 548 } 549 cso_hash_delete(hash); 550} 551 552void xorg_shaders_destroy(struct xorg_shaders *sc) 553{ 554 cache_destroy(sc->r->cso, sc->vs_hash, 555 PIPE_SHADER_VERTEX); 556 cache_destroy(sc->r->cso, sc->fs_hash, 557 PIPE_SHADER_FRAGMENT); 558 559 free(sc); 560} 561 562static INLINE void * 563shader_from_cache(struct pipe_context *pipe, 564 unsigned type, 565 struct cso_hash *hash, 566 unsigned key) 567{ 568 void *shader = 0; 569 570 struct cso_hash_iter iter = cso_hash_find(hash, key); 571 572 if (cso_hash_iter_is_null(iter)) { 573 if (type == PIPE_SHADER_VERTEX) 574 shader = create_vs(pipe, key); 575 else 576 shader = create_fs(pipe, key); 577 cso_hash_insert(hash, key, shader); 578 } else 579 shader = (void *)cso_hash_iter_data(iter); 580 581 return shader; 582} 583 584struct xorg_shader xorg_shaders_get(struct xorg_shaders *sc, 585 unsigned vs_traits, 586 unsigned fs_traits) 587{ 588 struct xorg_shader shader = { NULL, NULL }; 589 void *vs, *fs; 590 591 vs = shader_from_cache(sc->r->pipe, PIPE_SHADER_VERTEX, 592 sc->vs_hash, vs_traits); 593 fs = shader_from_cache(sc->r->pipe, PIPE_SHADER_FRAGMENT, 594 sc->fs_hash, fs_traits); 595 596 debug_assert(vs && fs); 597 if (!vs || !fs) 598 return shader; 599 600 shader.vs = vs; 601 shader.fs = fs; 602 603 return shader; 604} 605