vl_idct.c revision e87bd8c9578dee384ff03039aa792e1a8dae7f36
1/************************************************************************** 2 * 3 * Copyright 2010 Christian König 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "vl_idct.h" 29#include "vl_vertex_buffers.h" 30#include "vl_defines.h" 31#include "util/u_draw.h" 32#include <assert.h> 33#include <pipe/p_context.h> 34#include <pipe/p_screen.h> 35#include <util/u_inlines.h> 36#include <util/u_sampler.h> 37#include <util/u_format.h> 38#include <tgsi/tgsi_ureg.h> 39#include "vl_types.h" 40 41#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f) 42 43#define NR_RENDER_TARGETS 4 44 45enum VS_OUTPUT 46{ 47 VS_O_VPOS, 48 VS_O_L_ADDR0, 49 VS_O_L_ADDR1, 50 VS_O_R_ADDR0, 51 VS_O_R_ADDR1 52}; 53 54static const float const_matrix[8][8] = { 55 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f }, 56 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f }, 57 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f }, 58 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f }, 59 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f }, 60 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f }, 61 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f }, 62 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f } 63}; 64 65static void 66calc_addr(struct ureg_program *shader, struct ureg_dst addr[2], 67 struct ureg_src tc, struct ureg_src start, bool right_side, 68 bool transposed, float size) 69{ 70 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y; 71 unsigned sw_start = right_side ? TGSI_SWIZZLE_Y : TGSI_SWIZZLE_X; 72 73 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X; 74 unsigned sw_tc = right_side ? TGSI_SWIZZLE_X : TGSI_SWIZZLE_Y; 75 76 /* 77 * addr[0..1].(start) = right_side ? start.x : tc.x 78 * addr[0..1].(tc) = right_side ? tc.y : start.y 79 * addr[0..1].z = tc.z 80 * addr[1].(start) += 1.0f / scale 81 */ 82 ureg_MOV(shader, ureg_writemask(addr[0], wm_start), ureg_scalar(start, sw_start)); 83 ureg_MOV(shader, ureg_writemask(addr[0], wm_tc), ureg_scalar(tc, sw_tc)); 84 ureg_MOV(shader, ureg_writemask(addr[0], TGSI_WRITEMASK_Z), tc); 85 86 ureg_ADD(shader, ureg_writemask(addr[1], wm_start), ureg_scalar(start, sw_start), ureg_imm1f(shader, 1.0f / size)); 87 ureg_MOV(shader, ureg_writemask(addr[1], wm_tc), ureg_scalar(tc, sw_tc)); 88 ureg_MOV(shader, ureg_writemask(addr[1], TGSI_WRITEMASK_Z), tc); 89} 90 91static void * 92create_vert_shader(struct vl_idct *idct, bool matrix_stage, int color_swizzle) 93{ 94 struct ureg_program *shader; 95 struct ureg_src vrect, vpos, vblock, eb[4]; 96 struct ureg_src scale, blocks_xy, t_eb; 97 struct ureg_dst t_tex, t_start; 98 struct ureg_dst o_vpos, o_l_addr[2], o_r_addr[2]; 99 unsigned label; 100 101 shader = ureg_create(TGSI_PROCESSOR_VERTEX); 102 if (!shader) 103 return NULL; 104 105 t_tex = ureg_DECL_temporary(shader); 106 t_start = ureg_DECL_temporary(shader); 107 108 vrect = ureg_DECL_vs_input(shader, VS_I_RECT); 109 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS); 110 vblock = ureg_swizzle(vrect, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W, TGSI_SWIZZLE_X, TGSI_SWIZZLE_X); 111 112 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS); 113 114 eb[0] = ureg_DECL_vs_input(shader, VS_I_EB_0_0); 115 eb[1] = ureg_DECL_vs_input(shader, VS_I_EB_1_0); 116 eb[2] = ureg_DECL_vs_input(shader, VS_I_EB_0_1); 117 eb[3] = ureg_DECL_vs_input(shader, VS_I_EB_1_1); 118 119 o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0); 120 o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1); 121 122 o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0); 123 o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1); 124 125 /* 126 * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height) 127 * blocks_xy = (blocks_x, blocks_y) 128 * 129 * ar = vblock.y * blocks.x + vblock.x 130 * if eb[ar].(color_swizzle) 131 * o_vpos.xy = -1 132 * else 133 * t_tex = vpos * blocks_xy + vblock 134 * t_start = t_tex * scale 135 * t_tex = t_tex + vrect 136 * o_vpos.xy = t_tex * scale 137 * 138 * o_l_addr = calc_addr(...) 139 * o_r_addr = calc_addr(...) 140 * endif 141 * o_vpos.zw = vpos 142 * 143 */ 144 145 scale = ureg_imm2f(shader, 146 (float)BLOCK_WIDTH / idct->buffer_width, 147 (float)BLOCK_HEIGHT / idct->buffer_height); 148 149 blocks_xy = ureg_imm2f(shader, idct->blocks_x, idct->blocks_y); 150 151 if (idct->blocks_x > 1 || idct->blocks_y > 1) { 152 struct ureg_dst ar = ureg_DECL_address(shader); 153 154 ureg_MAD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_X), 155 ureg_scalar(vblock, TGSI_SWIZZLE_Y), blocks_xy, vblock); 156 157 ureg_ARL(shader, ureg_writemask(ar, TGSI_WRITEMASK_X), ureg_src(t_tex)); 158 t_eb = ureg_src_indirect(eb[0], ureg_src(ar)); 159 } else { 160 t_eb = eb[0]; 161 } 162 163 ureg_IF(shader, ureg_scalar(t_eb, color_swizzle), &label); 164 165 ureg_MOV(shader, o_vpos, ureg_imm1f(shader, -1.0f)); 166 167 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader)); 168 ureg_ELSE(shader, &label); 169 170 ureg_MAD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, blocks_xy, vblock); 171 ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale); 172 173 ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), vrect); 174 175 ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale); 176 ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_Z), 177 ureg_scalar(vrect, TGSI_SWIZZLE_X), 178 ureg_imm1f(shader, BLOCK_WIDTH / NR_RENDER_TARGETS)); 179 180 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_tex)); 181 182 if(matrix_stage) { 183 calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4); 184 calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4); 185 } else { 186 calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4); 187 calc_addr(shader, o_r_addr, ureg_src(t_tex), ureg_src(t_start), true, false, idct->buffer_height / 4); 188 } 189 190 ureg_fixup_label(shader, label, ureg_get_instruction_number(shader)); 191 ureg_ENDIF(shader); 192 193 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos); 194 195 ureg_release_temporary(shader, t_tex); 196 ureg_release_temporary(shader, t_start); 197 198 ureg_END(shader); 199 200 return ureg_create_shader_and_destroy(shader, idct->pipe); 201} 202 203static void 204increment_addr(struct ureg_program *shader, struct ureg_dst daddr[2], 205 struct ureg_src saddr[2], bool right_side, bool transposed, 206 int pos, float size) 207{ 208 unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y; 209 unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X; 210 211 /* 212 * daddr[0..1].(start) = saddr[0..1].(start) 213 * daddr[0..1].(tc) = saddr[0..1].(tc) 214 */ 215 216 ureg_MOV(shader, ureg_writemask(daddr[0], wm_start), saddr[0]); 217 ureg_ADD(shader, ureg_writemask(daddr[0], wm_tc), saddr[0], ureg_imm1f(shader, pos / size)); 218 ureg_MOV(shader, ureg_writemask(daddr[1], wm_start), saddr[1]); 219 ureg_ADD(shader, ureg_writemask(daddr[1], wm_tc), saddr[1], ureg_imm1f(shader, pos / size)); 220} 221 222static void 223fetch_four(struct ureg_program *shader, struct ureg_dst m[2], struct ureg_src addr[2], struct ureg_src sampler) 224{ 225 ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, addr[0], sampler); 226 ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, addr[1], sampler); 227} 228 229static void 230matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2]) 231{ 232 struct ureg_dst tmp; 233 234 tmp = ureg_DECL_temporary(shader); 235 236 /* 237 * tmp.xy = dot4(m[0][0..1], m[1][0..1]) 238 * dst = tmp.x + tmp.y 239 */ 240 ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0])); 241 ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1])); 242 ureg_ADD(shader, dst, 243 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), 244 ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y)); 245 246 ureg_release_temporary(shader, tmp); 247} 248 249static void * 250create_matrix_frag_shader(struct vl_idct *idct) 251{ 252 struct ureg_program *shader; 253 254 struct ureg_src l_addr[2], r_addr[2]; 255 256 struct ureg_dst l[4][2], r[2]; 257 struct ureg_dst fragment[NR_RENDER_TARGETS]; 258 259 unsigned i, j; 260 261 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); 262 if (!shader) 263 return NULL; 264 265 l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR); 266 l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR); 267 268 r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR); 269 r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR); 270 271 for (i = 0; i < NR_RENDER_TARGETS; ++i) 272 fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i); 273 274 for (i = 0; i < 4; ++i) { 275 l[i][0] = ureg_DECL_temporary(shader); 276 l[i][1] = ureg_DECL_temporary(shader); 277 } 278 279 r[0] = ureg_DECL_temporary(shader); 280 r[1] = ureg_DECL_temporary(shader); 281 282 for (i = 1; i < 4; ++i) { 283 increment_addr(shader, l[i], l_addr, false, false, i, idct->buffer_height); 284 } 285 286 for (i = 0; i < 4; ++i) { 287 struct ureg_src s_addr[2]; 288 s_addr[0] = i == 0 ? l_addr[0] : ureg_src(l[i][0]); 289 s_addr[1] = i == 0 ? l_addr[1] : ureg_src(l[i][1]); 290 fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 1)); 291 } 292 293 for (i = 0; i < NR_RENDER_TARGETS; ++i) { 294 if(i > 0) 295 increment_addr(shader, r, r_addr, true, true, i, BLOCK_HEIGHT); 296 297 struct ureg_src s_addr[2] = { ureg_src(r[0]), ureg_src(r[1]) }; 298 s_addr[0] = i == 0 ? r_addr[0] : ureg_src(r[0]); 299 s_addr[1] = i == 0 ? r_addr[1] : ureg_src(r[1]); 300 fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 0)); 301 302 for (j = 0; j < 4; ++j) { 303 matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r); 304 } 305 } 306 307 for (i = 0; i < 4; ++i) { 308 ureg_release_temporary(shader, l[i][0]); 309 ureg_release_temporary(shader, l[i][1]); 310 } 311 ureg_release_temporary(shader, r[0]); 312 ureg_release_temporary(shader, r[1]); 313 314 ureg_END(shader); 315 316 return ureg_create_shader_and_destroy(shader, idct->pipe); 317} 318 319static void * 320create_transpose_frag_shader(struct vl_idct *idct) 321{ 322 struct ureg_program *shader; 323 324 struct ureg_src l_addr[2], r_addr[2]; 325 326 struct ureg_dst l[2], r[2]; 327 struct ureg_dst fragment; 328 329 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); 330 if (!shader) 331 return NULL; 332 333 l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR); 334 l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR); 335 336 r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR); 337 r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR); 338 339 l[0] = ureg_DECL_temporary(shader); 340 l[1] = ureg_DECL_temporary(shader); 341 r[0] = ureg_DECL_temporary(shader); 342 r[1] = ureg_DECL_temporary(shader); 343 344 fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 0)); 345 fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 1)); 346 347 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); 348 349 matrix_mul(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X), l, r); 350 351 ureg_release_temporary(shader, l[0]); 352 ureg_release_temporary(shader, l[1]); 353 ureg_release_temporary(shader, r[0]); 354 ureg_release_temporary(shader, r[1]); 355 356 ureg_END(shader); 357 358 return ureg_create_shader_and_destroy(shader, idct->pipe); 359} 360 361static bool 362init_shaders(struct vl_idct *idct, int color_swizzle) 363{ 364 idct->matrix_vs = create_vert_shader(idct, true, color_swizzle); 365 idct->matrix_fs = create_matrix_frag_shader(idct); 366 367 idct->transpose_vs = create_vert_shader(idct, false, color_swizzle); 368 idct->transpose_fs = create_transpose_frag_shader(idct); 369 370 return 371 idct->matrix_vs != NULL && 372 idct->matrix_fs != NULL && 373 idct->transpose_vs != NULL && 374 idct->transpose_fs != NULL; 375} 376 377static void 378cleanup_shaders(struct vl_idct *idct) 379{ 380 idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs); 381 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs); 382 idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs); 383 idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs); 384} 385 386static bool 387init_state(struct vl_idct *idct) 388{ 389 struct pipe_sampler_state sampler; 390 struct pipe_rasterizer_state rs_state; 391 unsigned i; 392 393 assert(idct); 394 395 for (i = 0; i < 4; ++i) { 396 memset(&sampler, 0, sizeof(sampler)); 397 sampler.wrap_s = PIPE_TEX_WRAP_REPEAT; 398 sampler.wrap_t = PIPE_TEX_WRAP_REPEAT; 399 sampler.wrap_r = PIPE_TEX_WRAP_REPEAT; 400 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST; 401 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE; 402 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST; 403 sampler.compare_mode = PIPE_TEX_COMPARE_NONE; 404 sampler.compare_func = PIPE_FUNC_ALWAYS; 405 sampler.normalized_coords = 1; 406 /*sampler.shadow_ambient = ; */ 407 /*sampler.lod_bias = ; */ 408 sampler.min_lod = 0; 409 /*sampler.max_lod = ; */ 410 /*sampler.border_color[0] = ; */ 411 /*sampler.max_anisotropy = ; */ 412 idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler); 413 } 414 415 memset(&rs_state, 0, sizeof(rs_state)); 416 /*rs_state.sprite_coord_enable */ 417 rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT; 418 rs_state.point_quad_rasterization = true; 419 rs_state.point_size = BLOCK_WIDTH; 420 rs_state.gl_rasterization_rules = false; 421 idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state); 422 423 return true; 424} 425 426static void 427cleanup_state(struct vl_idct *idct) 428{ 429 unsigned i; 430 431 for (i = 0; i < 4; ++i) 432 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]); 433 434 idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state); 435} 436 437static bool 438init_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer) 439{ 440 struct pipe_resource template; 441 struct pipe_sampler_view sampler_view; 442 unsigned i; 443 444 assert(idct && buffer); 445 446 /* create textures */ 447 memset(&template, 0, sizeof(struct pipe_resource)); 448 template.last_level = 0; 449 template.bind = PIPE_BIND_SAMPLER_VIEW; 450 template.flags = 0; 451 452 template.target = PIPE_TEXTURE_2D; 453 template.format = PIPE_FORMAT_R16G16B16A16_SNORM; 454 template.width0 = idct->buffer_width / 4; 455 template.height0 = idct->buffer_height; 456 template.depth0 = 1; 457 template.array_size = 1; 458 template.usage = PIPE_USAGE_STREAM; 459 buffer->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template); 460 461 template.target = PIPE_TEXTURE_3D; 462 template.format = PIPE_FORMAT_R16G16B16A16_SNORM; 463 template.width0 = idct->buffer_width / NR_RENDER_TARGETS; 464 template.height0 = idct->buffer_height / 4; 465 template.depth0 = NR_RENDER_TARGETS; 466 template.usage = PIPE_USAGE_STATIC; 467 buffer->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template); 468 469 for (i = 0; i < 4; ++i) { 470 if(buffer->textures.all[i] == NULL) 471 return false; /* a texture failed to allocate */ 472 473 u_sampler_view_default_template(&sampler_view, buffer->textures.all[i], buffer->textures.all[i]->format); 474 buffer->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, buffer->textures.all[i], &sampler_view); 475 } 476 477 template.target = PIPE_TEXTURE_2D; 478 /* TODO: Accomodate HW that can't do this and also for cases when this isn't precise enough */ 479 template.format = PIPE_FORMAT_R16_SNORM; 480 template.width0 = idct->buffer_width; 481 template.height0 = idct->buffer_height; 482 template.depth0 = 1; 483 484 buffer->destination = idct->pipe->screen->resource_create(idct->pipe->screen, &template); 485 486 return true; 487} 488 489static void 490cleanup_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer) 491{ 492 unsigned i; 493 494 assert(idct && buffer); 495 496 for (i = 0; i < 4; ++i) { 497 pipe_sampler_view_reference(&buffer->sampler_views.all[i], NULL); 498 pipe_resource_reference(&buffer->textures.all[i], NULL); 499 } 500} 501 502struct pipe_resource * 503vl_idct_upload_matrix(struct pipe_context *pipe) 504{ 505 const float scale = sqrtf(SCALE_FACTOR_16_TO_9); 506 507 struct pipe_resource template, *matrix; 508 struct pipe_transfer *buf_transfer; 509 unsigned i, j, pitch; 510 float *f; 511 512 struct pipe_box rect = 513 { 514 0, 0, 0, 515 BLOCK_WIDTH / 4, 516 BLOCK_HEIGHT, 517 1 518 }; 519 520 memset(&template, 0, sizeof(struct pipe_resource)); 521 template.target = PIPE_TEXTURE_2D; 522 template.format = PIPE_FORMAT_R32G32B32A32_FLOAT; 523 template.last_level = 0; 524 template.width0 = 2; 525 template.height0 = 8; 526 template.depth0 = 1; 527 template.array_size = 1; 528 template.usage = PIPE_USAGE_IMMUTABLE; 529 template.bind = PIPE_BIND_SAMPLER_VIEW; 530 template.flags = 0; 531 532 matrix = pipe->screen->resource_create(pipe->screen, &template); 533 534 /* matrix */ 535 buf_transfer = pipe->get_transfer 536 ( 537 pipe, matrix, 538 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD, 539 &rect 540 ); 541 pitch = buf_transfer->stride / sizeof(float); 542 543 f = pipe->transfer_map(pipe, buf_transfer); 544 for(i = 0; i < BLOCK_HEIGHT; ++i) 545 for(j = 0; j < BLOCK_WIDTH; ++j) 546 // transpose and scale 547 f[i * pitch + j] = const_matrix[j][i] * scale; 548 549 pipe->transfer_unmap(pipe, buf_transfer); 550 pipe->transfer_destroy(pipe, buf_transfer); 551 552 return matrix; 553} 554 555bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, 556 unsigned buffer_width, unsigned buffer_height, 557 unsigned blocks_x, unsigned blocks_y, 558 int color_swizzle, struct pipe_resource *matrix) 559{ 560 assert(idct && pipe && matrix); 561 562 idct->pipe = pipe; 563 idct->buffer_width = buffer_width; 564 idct->buffer_height = buffer_height; 565 idct->blocks_x = blocks_x; 566 idct->blocks_y = blocks_y; 567 pipe_resource_reference(&idct->matrix, matrix); 568 569 if(!init_shaders(idct, color_swizzle)) 570 return false; 571 572 if(!init_state(idct)) { 573 cleanup_shaders(idct); 574 return false; 575 } 576 577 return true; 578} 579 580void 581vl_idct_cleanup(struct vl_idct *idct) 582{ 583 cleanup_shaders(idct); 584 cleanup_state(idct); 585 586 pipe_resource_reference(&idct->matrix, NULL); 587} 588 589struct pipe_resource * 590vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer) 591{ 592 struct pipe_surface template; 593 594 unsigned i; 595 596 assert(buffer); 597 assert(idct); 598 599 pipe_resource_reference(&buffer->textures.individual.matrix, idct->matrix); 600 pipe_resource_reference(&buffer->textures.individual.transpose, idct->matrix); 601 602 if (!init_textures(idct, buffer)) 603 return NULL; 604 605 /* init state */ 606 buffer->viewport[0].scale[0] = buffer->textures.individual.intermediate->width0; 607 buffer->viewport[0].scale[1] = buffer->textures.individual.intermediate->height0; 608 609 buffer->viewport[1].scale[0] = buffer->destination->width0; 610 buffer->viewport[1].scale[1] = buffer->destination->height0; 611 612 buffer->fb_state[0].width = buffer->textures.individual.intermediate->width0; 613 buffer->fb_state[0].height = buffer->textures.individual.intermediate->height0; 614 615 buffer->fb_state[0].nr_cbufs = NR_RENDER_TARGETS; 616 for(i = 0; i < NR_RENDER_TARGETS; ++i) { 617 memset(&template, 0, sizeof(template)); 618 template.format = buffer->textures.individual.intermediate->format; 619 template.u.tex.first_layer = i; 620 template.u.tex.last_layer = i; 621 template.usage = PIPE_BIND_RENDER_TARGET; 622 buffer->fb_state[0].cbufs[i] = idct->pipe->create_surface( 623 idct->pipe, buffer->textures.individual.intermediate, 624 &template); 625 } 626 627 buffer->fb_state[1].width = buffer->destination->width0; 628 buffer->fb_state[1].height = buffer->destination->height0; 629 630 buffer->fb_state[1].nr_cbufs = 1; 631 632 memset(&template, 0, sizeof(template)); 633 template.format = buffer->destination->format; 634 template.usage = PIPE_BIND_RENDER_TARGET; 635 buffer->fb_state[1].cbufs[0] = idct->pipe->create_surface( 636 idct->pipe, buffer->destination, &template); 637 638 for(i = 0; i < 2; ++i) { 639 buffer->viewport[i].scale[2] = 1; 640 buffer->viewport[i].scale[3] = 1; 641 buffer->viewport[i].translate[0] = 0; 642 buffer->viewport[i].translate[1] = 0; 643 buffer->viewport[i].translate[2] = 0; 644 buffer->viewport[i].translate[3] = 0; 645 646 buffer->fb_state[i].zsbuf = NULL; 647 } 648 649 return buffer->destination; 650} 651 652void 653vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer) 654{ 655 unsigned i; 656 657 assert(buffer); 658 659 for(i = 0; i < NR_RENDER_TARGETS; ++i) { 660 idct->pipe->surface_destroy(idct->pipe, buffer->fb_state[0].cbufs[i]); 661 } 662 663 idct->pipe->surface_destroy(idct->pipe, buffer->fb_state[1].cbufs[0]); 664 665 cleanup_textures(idct, buffer); 666} 667 668void 669vl_idct_map_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer) 670{ 671 assert(idct); 672 673 struct pipe_box rect = 674 { 675 0, 0, 0, 676 buffer->textures.individual.source->width0, 677 buffer->textures.individual.source->height0, 678 1 679 }; 680 681 buffer->tex_transfer = idct->pipe->get_transfer 682 ( 683 idct->pipe, buffer->textures.individual.source, 684 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD, 685 &rect 686 ); 687 688 buffer->texels = idct->pipe->transfer_map(idct->pipe, buffer->tex_transfer); 689} 690 691void 692vl_idct_add_block(struct vl_idct_buffer *buffer, unsigned x, unsigned y, short *block) 693{ 694 unsigned tex_pitch; 695 short *texels; 696 697 unsigned i; 698 699 assert(buffer); 700 701 tex_pitch = buffer->tex_transfer->stride / sizeof(short); 702 texels = buffer->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH; 703 704 for (i = 0; i < BLOCK_HEIGHT; ++i) 705 memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short)); 706} 707 708void 709vl_idct_unmap_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer) 710{ 711 assert(idct && buffer); 712 713 idct->pipe->transfer_unmap(idct->pipe, buffer->tex_transfer); 714 idct->pipe->transfer_destroy(idct->pipe, buffer->tex_transfer); 715} 716 717void 718vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer, unsigned num_instances) 719{ 720 unsigned num_verts; 721 722 assert(idct); 723 assert(buffer); 724 725 if(num_instances > 0) { 726 num_verts = idct->blocks_x * idct->blocks_y * 4; 727 728 idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state); 729 730 /* first stage */ 731 idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[0]); 732 idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[0]); 733 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]); 734 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]); 735 idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs); 736 idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs); 737 util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts, 0, num_instances); 738 739 /* second stage */ 740 idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[1]); 741 idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[1]); 742 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]); 743 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]); 744 idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs); 745 idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs); 746 util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts, 0, num_instances); 747 } 748} 749