vl_idct.c revision 13e28cff7655adec0f89aed9c5ee74f8481133ab
1/************************************************************************** 2 * 3 * Copyright 2010 Christian König 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "vl_idct.h" 29#include "util/u_draw.h" 30#include <assert.h> 31#include <pipe/p_context.h> 32#include <pipe/p_screen.h> 33#include <util/u_inlines.h> 34#include <util/u_sampler.h> 35#include <util/u_format.h> 36#include <tgsi/tgsi_ureg.h> 37#include "vl_types.h" 38 39#define BLOCK_WIDTH 8 40#define BLOCK_HEIGHT 8 41 42#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f) 43 44#define STAGE1_SCALE 4.0f 45#define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE) 46 47struct vertex_shader_consts 48{ 49 struct vertex4f norm; 50}; 51 52enum VS_INPUT 53{ 54 VS_I_RECT, 55 VS_I_VPOS, 56 57 NUM_VS_INPUTS 58}; 59 60enum VS_OUTPUT 61{ 62 VS_O_VPOS, 63 VS_O_BLOCK, 64 VS_O_TEX, 65 VS_O_START 66}; 67 68static const float const_matrix[8][8] = { 69 { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f }, 70 { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f }, 71 { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f }, 72 { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f }, 73 { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f }, 74 { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f }, 75 { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f }, 76 { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f } 77}; 78 79/* vertices for a quad covering a block */ 80static const struct vertex2f const_quad[4] = { 81 {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f} 82}; 83 84static void * 85create_vert_shader(struct vl_idct *idct) 86{ 87 struct ureg_program *shader; 88 struct ureg_src scale; 89 struct ureg_src vrect, vpos; 90 struct ureg_dst t_vpos; 91 struct ureg_dst o_vpos, o_block, o_tex, o_start; 92 93 shader = ureg_create(TGSI_PROCESSOR_VERTEX); 94 if (!shader) 95 return NULL; 96 97 scale = ureg_imm2f(shader, 98 (float)BLOCK_WIDTH / idct->destination->width0, 99 (float)BLOCK_HEIGHT / idct->destination->height0); 100 101 t_vpos = ureg_DECL_temporary(shader); 102 103 vrect = ureg_DECL_vs_input(shader, VS_I_RECT); 104 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS); 105 106 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS); 107 o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK); 108 o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX); 109 o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START); 110 111 /* 112 * t_vpos = vpos + vrect 113 * o_vpos.xy = t_vpos * scale 114 * o_vpos.zw = vpos 115 * 116 * o_block = vrect 117 * o_tex = t_pos 118 * o_start = vpos * scale 119 * 120 */ 121 ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect); 122 ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), scale); 123 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos)); 124 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos); 125 126 ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect); 127 ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos)); 128 ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, scale); 129 130 ureg_release_temporary(shader, t_vpos); 131 132 ureg_END(shader); 133 134 return ureg_create_shader_and_destroy(shader, idct->pipe); 135} 136 137static void 138fetch_one(struct ureg_program *shader, struct ureg_dst m[2], 139 struct ureg_src tc, struct ureg_src sampler, 140 struct ureg_src start, bool right_side, float size) 141{ 142 struct ureg_dst t_tc, tmp; 143 unsigned i, j; 144 145 t_tc = ureg_DECL_temporary(shader); 146 tmp = ureg_DECL_temporary(shader); 147 148 m[0] = ureg_DECL_temporary(shader); 149 m[1] = ureg_DECL_temporary(shader); 150 151 /* 152 * t_tc.x = right_side ? start.x : tc.x 153 * t_tc.y = right_side ? tc.y : start.y 154 * m[0..1].xyzw = tex(t_tc++, sampler) 155 */ 156 if(right_side) { 157 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(tc, TGSI_SWIZZLE_X)); 158 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(start, TGSI_SWIZZLE_Y)); 159 } else { 160 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_X)); 161 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_Y)); 162 } 163 for(i = 0; i < 2; ++i) { 164 for(j = 0; j < 4; ++j) { 165 /* Nouveau and r600g can't writemask tex dst regs (yet?), do in two steps */ 166 ureg_TEX(shader, tmp, TGSI_TEXTURE_2D, ureg_src(t_tc), sampler); 167 ureg_MOV(shader, ureg_writemask(m[i], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X)); 168 169 if(i != 1 || j != 3) /* skip the last add */ 170 ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X << right_side), 171 ureg_src(t_tc), ureg_imm1f(shader, 1.0f / size)); 172 } 173 } 174 175 ureg_release_temporary(shader, t_tc); 176 ureg_release_temporary(shader, tmp); 177} 178 179static void 180fetch_four(struct ureg_program *shader, struct ureg_dst m[2], 181 struct ureg_src tc, struct ureg_src sampler, 182 struct ureg_src start, bool right_side, float size) 183{ 184 struct ureg_dst t_tc; 185 186 t_tc = ureg_DECL_temporary(shader); 187 m[0] = ureg_DECL_temporary(shader); 188 m[1] = ureg_DECL_temporary(shader); 189 190 /* 191 * t_tc.x = right_side ? start.x : tc.x 192 * t_tc.y = right_side ? tc.y : start.y 193 * m[0..1] = tex(t_tc++, sampler) 194 */ 195 if(right_side) { 196 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_Y)); 197 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_X)); 198 } else { 199 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_X)); 200 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_Y)); 201 } 202 203 ureg_TEX(shader, m[0], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler); 204 ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_imm1f(shader, 4.0f / size)); 205 ureg_TEX(shader, m[1], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler); 206 207 ureg_release_temporary(shader, t_tc); 208} 209 210static struct ureg_dst 211matrix_mul(struct ureg_program *shader, struct ureg_dst m[2][2]) 212{ 213 struct ureg_dst dst, tmp[2]; 214 unsigned i; 215 216 dst = ureg_DECL_temporary(shader); 217 for(i = 0; i < 2; ++i) { 218 tmp[i] = ureg_DECL_temporary(shader); 219 } 220 221 /* 222 * tmp[0..1] = dot4(m[0][0..1], m[1][0..1]) 223 * dst = tmp[0] + tmp[1] 224 */ 225 ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(m[0][0]), ureg_src(m[1][0])); 226 ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(m[0][1]), ureg_src(m[1][1])); 227 ureg_ADD(shader, ureg_writemask(dst, TGSI_WRITEMASK_X), ureg_src(tmp[0]), ureg_src(tmp[1])); 228 229 for(i = 0; i < 2; ++i) { 230 ureg_release_temporary(shader, tmp[i]); 231 } 232 233 return dst; 234} 235 236static void * 237create_transpose_frag_shader(struct vl_idct *idct) 238{ 239 struct ureg_program *shader; 240 241 struct ureg_src tc[2], sampler[2]; 242 struct ureg_src start[2]; 243 244 struct ureg_dst m[2][2]; 245 struct ureg_dst tmp, fragment; 246 247 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); 248 if (!shader) 249 return NULL; 250 251 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR); 252 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR); 253 254 sampler[0] = ureg_DECL_sampler(shader, 0); 255 sampler[1] = ureg_DECL_sampler(shader, 1); 256 257 start[0] = ureg_imm1f(shader, 0.0f); 258 start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT); 259 260 fetch_four(shader, m[0], tc[0], sampler[0], start[0], false, BLOCK_WIDTH); 261 fetch_one(shader, m[1], tc[1], sampler[1], start[1], true, idct->destination->height0); 262 263 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); 264 265 tmp = matrix_mul(shader, m); 266 ureg_MUL(shader, fragment, ureg_src(tmp), ureg_imm1f(shader, STAGE2_SCALE)); 267 268 ureg_release_temporary(shader, tmp); 269 ureg_release_temporary(shader, m[0][0]); 270 ureg_release_temporary(shader, m[0][1]); 271 ureg_release_temporary(shader, m[1][0]); 272 ureg_release_temporary(shader, m[1][1]); 273 274 ureg_END(shader); 275 276 return ureg_create_shader_and_destroy(shader, idct->pipe); 277} 278 279static void * 280create_matrix_frag_shader(struct vl_idct *idct) 281{ 282 struct ureg_program *shader; 283 284 struct ureg_src tc[2], sampler[2]; 285 struct ureg_src start[2]; 286 287 struct ureg_dst m[2][2]; 288 struct ureg_dst tmp, fragment; 289 290 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); 291 if (!shader) 292 return NULL; 293 294 tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR); 295 tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR); 296 297 sampler[0] = ureg_DECL_sampler(shader, 1); 298 sampler[1] = ureg_DECL_sampler(shader, 0); 299 300 start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT); 301 start[1] = ureg_imm1f(shader, 0.0f); 302 303 fetch_four(shader, m[0], tc[0], sampler[0], start[0], false, idct->destination->width0); 304 fetch_four(shader, m[1], tc[1], sampler[1], start[1], true, BLOCK_HEIGHT); 305 306 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); 307 308 tmp = matrix_mul(shader, m); 309 ureg_MUL(shader, fragment, ureg_src(tmp), ureg_imm1f(shader, STAGE1_SCALE)); 310 311 ureg_release_temporary(shader, tmp); 312 ureg_release_temporary(shader, m[0][0]); 313 ureg_release_temporary(shader, m[0][1]); 314 ureg_release_temporary(shader, m[1][0]); 315 ureg_release_temporary(shader, m[1][1]); 316 317 ureg_END(shader); 318 319 return ureg_create_shader_and_destroy(shader, idct->pipe); 320} 321 322static void * 323create_empty_block_frag_shader(struct vl_idct *idct) 324{ 325 struct ureg_program *shader; 326 struct ureg_dst fragment; 327 328 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); 329 if (!shader) 330 return NULL; 331 332 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); 333 334 ureg_MOV(shader, fragment, ureg_imm1f(shader, 0.0f)); 335 336 ureg_END(shader); 337 338 return ureg_create_shader_and_destroy(shader, idct->pipe); 339} 340 341static void 342xfer_buffers_map(struct vl_idct *idct) 343{ 344 struct pipe_box rect = 345 { 346 0, 0, 0, 347 idct->destination->width0, 348 idct->destination->height0, 349 1 350 }; 351 352 idct->tex_transfer = idct->pipe->get_transfer 353 ( 354 idct->pipe, idct->textures.individual.source, 355 u_subresource(0, 0), 356 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD, 357 &rect 358 ); 359 360 idct->texels = idct->pipe->transfer_map(idct->pipe, idct->tex_transfer); 361 362 idct->vectors = pipe_buffer_map 363 ( 364 idct->pipe, 365 idct->vertex_bufs.individual.pos.buffer, 366 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD, 367 &idct->vec_transfer 368 ); 369} 370 371static void 372xfer_buffers_unmap(struct vl_idct *idct) 373{ 374 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.pos.buffer, idct->vec_transfer); 375 376 idct->pipe->transfer_unmap(idct->pipe, idct->tex_transfer); 377 idct->pipe->transfer_destroy(idct->pipe, idct->tex_transfer); 378} 379 380static bool 381init_shaders(struct vl_idct *idct) 382{ 383 idct->vs = create_vert_shader(idct); 384 idct->transpose_fs = create_transpose_frag_shader(idct); 385 idct->matrix_fs = create_matrix_frag_shader(idct); 386 idct->eb_fs = create_empty_block_frag_shader(idct); 387 388 return 389 idct->vs != NULL && 390 idct->transpose_fs != NULL && 391 idct->matrix_fs != NULL && 392 idct->eb_fs != NULL; 393} 394 395static void 396cleanup_shaders(struct vl_idct *idct) 397{ 398 idct->pipe->delete_vs_state(idct->pipe, idct->vs); 399 idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs); 400 idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs); 401 idct->pipe->delete_fs_state(idct->pipe, idct->eb_fs); 402} 403 404static bool 405init_buffers(struct vl_idct *idct) 406{ 407 struct pipe_resource template; 408 struct pipe_sampler_view sampler_view; 409 struct pipe_vertex_element vertex_elems[2]; 410 unsigned i; 411 412 idct->max_blocks = 413 align(idct->destination->width0, BLOCK_WIDTH) / BLOCK_WIDTH * 414 align(idct->destination->height0, BLOCK_HEIGHT) / BLOCK_HEIGHT * 415 idct->destination->depth0; 416 417 memset(&template, 0, sizeof(struct pipe_resource)); 418 template.target = PIPE_TEXTURE_2D; 419 template.format = PIPE_FORMAT_R32G32B32A32_FLOAT; 420 template.last_level = 0; 421 template.width0 = 2; 422 template.height0 = 8; 423 template.depth0 = 1; 424 template.usage = PIPE_USAGE_IMMUTABLE; 425 template.bind = PIPE_BIND_SAMPLER_VIEW; 426 template.flags = 0; 427 428 template.format = PIPE_FORMAT_R16G16B16A16_SNORM; 429 template.width0 = idct->destination->width0; 430 template.height0 = idct->destination->height0; 431 template.depth0 = idct->destination->depth0; 432 template.usage = PIPE_USAGE_STREAM; 433 idct->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template); 434 435 template.format = idct->destination->format; 436 template.usage = PIPE_USAGE_STATIC; 437 idct->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template); 438 439 for (i = 0; i < 4; ++i) { 440 if(idct->textures.all[i] == NULL) 441 return false; /* a texture failed to allocate */ 442 443 u_sampler_view_default_template(&sampler_view, idct->textures.all[i], idct->textures.all[i]->format); 444 idct->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, idct->textures.all[i], &sampler_view); 445 } 446 447 idct->vertex_bufs.individual.quad.stride = sizeof(struct vertex2f); 448 idct->vertex_bufs.individual.quad.max_index = 4 * idct->max_blocks - 1; 449 idct->vertex_bufs.individual.quad.buffer_offset = 0; 450 idct->vertex_bufs.individual.quad.buffer = pipe_buffer_create 451 ( 452 idct->pipe->screen, 453 PIPE_BIND_VERTEX_BUFFER, 454 sizeof(struct vertex2f) * 4 * idct->max_blocks 455 ); 456 457 if(idct->vertex_bufs.individual.quad.buffer == NULL) 458 return false; 459 460 idct->vertex_bufs.individual.pos.stride = sizeof(struct vertex2f); 461 idct->vertex_bufs.individual.pos.max_index = 4 * idct->max_blocks - 1; 462 idct->vertex_bufs.individual.pos.buffer_offset = 0; 463 idct->vertex_bufs.individual.pos.buffer = pipe_buffer_create 464 ( 465 idct->pipe->screen, 466 PIPE_BIND_VERTEX_BUFFER, 467 sizeof(struct vertex2f) * 4 * idct->max_blocks 468 ); 469 470 if(idct->vertex_bufs.individual.pos.buffer == NULL) 471 return false; 472 473 /* Rect element */ 474 vertex_elems[0].src_offset = 0; 475 vertex_elems[0].instance_divisor = 0; 476 vertex_elems[0].vertex_buffer_index = 0; 477 vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT; 478 479 /* Pos element */ 480 vertex_elems[1].src_offset = 0; 481 vertex_elems[1].instance_divisor = 0; 482 vertex_elems[1].vertex_buffer_index = 1; 483 vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT; 484 485 idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems); 486 487 return true; 488} 489 490static void 491cleanup_buffers(struct vl_idct *idct) 492{ 493 unsigned i; 494 495 assert(idct); 496 497 for (i = 0; i < 4; ++i) { 498 pipe_sampler_view_reference(&idct->sampler_views.all[i], NULL); 499 pipe_resource_reference(&idct->textures.all[i], NULL); 500 } 501 502 idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state); 503 pipe_resource_reference(&idct->vertex_bufs.individual.quad.buffer, NULL); 504 pipe_resource_reference(&idct->vertex_bufs.individual.pos.buffer, NULL); 505} 506 507static void 508init_constants(struct vl_idct *idct) 509{ 510 struct pipe_transfer *buf_transfer; 511 struct vertex2f *v; 512 513 unsigned i; 514 515 /* quad vectors */ 516 v = pipe_buffer_map 517 ( 518 idct->pipe, 519 idct->vertex_bufs.individual.quad.buffer, 520 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD, 521 &buf_transfer 522 ); 523 for ( i = 0; i < idct->max_blocks; ++i) 524 memcpy(v + i * 4, &const_quad, sizeof(const_quad)); 525 pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.quad.buffer, buf_transfer); 526} 527 528static void 529init_state(struct vl_idct *idct) 530{ 531 struct pipe_sampler_state sampler; 532 unsigned i; 533 534 idct->num_blocks = 0; 535 idct->num_empty_blocks = 0; 536 537 idct->viewport.scale[0] = idct->destination->width0; 538 idct->viewport.scale[1] = idct->destination->height0; 539 idct->viewport.scale[2] = 1; 540 idct->viewport.scale[3] = 1; 541 idct->viewport.translate[0] = 0; 542 idct->viewport.translate[1] = 0; 543 idct->viewport.translate[2] = 0; 544 idct->viewport.translate[3] = 0; 545 546 idct->fb_state.width = idct->destination->width0; 547 idct->fb_state.height = idct->destination->height0; 548 idct->fb_state.nr_cbufs = 1; 549 idct->fb_state.zsbuf = NULL; 550 551 for (i = 0; i < 4; ++i) { 552 memset(&sampler, 0, sizeof(sampler)); 553 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE; 554 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE; 555 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE; 556 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST; 557 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE; 558 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST; 559 sampler.compare_mode = PIPE_TEX_COMPARE_NONE; 560 sampler.compare_func = PIPE_FUNC_ALWAYS; 561 sampler.normalized_coords = 1; 562 /*sampler.shadow_ambient = ; */ 563 /*sampler.lod_bias = ; */ 564 sampler.min_lod = 0; 565 /*sampler.max_lod = ; */ 566 /*sampler.border_color[0] = ; */ 567 /*sampler.max_anisotropy = ; */ 568 idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler); 569 } 570} 571 572static void 573cleanup_state(struct vl_idct *idct) 574{ 575 unsigned i; 576 577 for (i = 0; i < 4; ++i) 578 idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]); 579} 580 581struct pipe_resource * 582vl_idct_upload_matrix(struct pipe_context *pipe) 583{ 584 struct pipe_resource template, *matrix; 585 struct pipe_transfer *buf_transfer; 586 unsigned i, j, pitch; 587 float *f; 588 589 struct pipe_box rect = 590 { 591 0, 0, 0, 592 BLOCK_WIDTH, 593 BLOCK_HEIGHT, 594 1 595 }; 596 597 memset(&template, 0, sizeof(struct pipe_resource)); 598 template.target = PIPE_TEXTURE_2D; 599 template.format = PIPE_FORMAT_R32G32B32A32_FLOAT; 600 template.last_level = 0; 601 template.width0 = 2; 602 template.height0 = 8; 603 template.depth0 = 1; 604 template.usage = PIPE_USAGE_IMMUTABLE; 605 template.bind = PIPE_BIND_SAMPLER_VIEW; 606 template.flags = 0; 607 608 matrix = pipe->screen->resource_create(pipe->screen, &template); 609 610 /* matrix */ 611 buf_transfer = pipe->get_transfer 612 ( 613 pipe, matrix, 614 u_subresource(0, 0), 615 PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD, 616 &rect 617 ); 618 pitch = buf_transfer->stride / util_format_get_blocksize(buf_transfer->resource->format); 619 620 f = pipe->transfer_map(pipe, buf_transfer); 621 for(i = 0; i < BLOCK_HEIGHT; ++i) 622 for(j = 0; j < BLOCK_WIDTH; ++j) 623 f[i * pitch * 4 + j] = const_matrix[j][i]; // transpose 624 625 pipe->transfer_unmap(pipe, buf_transfer); 626 pipe->transfer_destroy(pipe, buf_transfer); 627 628 return matrix; 629} 630 631bool 632vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst, struct pipe_resource *matrix) 633{ 634 assert(idct && pipe && dst); 635 636 idct->pipe = pipe; 637 pipe_resource_reference(&idct->textures.individual.matrix, matrix); 638 pipe_resource_reference(&idct->textures.individual.transpose, matrix); 639 pipe_resource_reference(&idct->destination, dst); 640 641 init_state(idct); 642 643 if(!init_shaders(idct)) 644 return false; 645 646 if(!init_buffers(idct)) { 647 cleanup_shaders(idct); 648 return false; 649 } 650 651 idct->surfaces.intermediate = idct->pipe->screen->get_tex_surface( 652 idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, 0, 653 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET); 654 655 idct->surfaces.destination = idct->pipe->screen->get_tex_surface( 656 idct->pipe->screen, idct->destination, 0, 0, 0, 657 PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET); 658 659 init_constants(idct); 660 xfer_buffers_map(idct); 661 662 return true; 663} 664 665void 666vl_idct_cleanup(struct vl_idct *idct) 667{ 668 idct->pipe->screen->tex_surface_destroy(idct->surfaces.destination); 669 idct->pipe->screen->tex_surface_destroy(idct->surfaces.intermediate); 670 671 cleanup_shaders(idct); 672 cleanup_buffers(idct); 673 674 cleanup_state(idct); 675 676 pipe_resource_reference(&idct->destination, NULL); 677} 678 679void 680vl_idct_add_block(struct vl_idct *idct, unsigned x, unsigned y, short *block) 681{ 682 struct vertex2f v, *v_dst; 683 684 unsigned tex_pitch; 685 short *texels; 686 687 unsigned i; 688 689 assert(idct); 690 691 if(block) { 692 tex_pitch = idct->tex_transfer->stride / util_format_get_blocksize(idct->tex_transfer->resource->format); 693 texels = idct->texels + (y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH) * 4; 694 695 for (i = 0; i < BLOCK_HEIGHT; ++i) 696 memcpy(texels + i * tex_pitch * 4, block + i * BLOCK_WIDTH, BLOCK_WIDTH * 2); 697 698 /* non empty blocks fills the vector buffer from left to right */ 699 v_dst = idct->vectors + idct->num_blocks * 4; 700 701 idct->num_blocks++; 702 703 } else { 704 705 /* while empty blocks fills the vector buffer from right to left */ 706 v_dst = idct->vectors + (idct->max_blocks - idct->num_empty_blocks) * 4 - 4; 707 708 idct->num_empty_blocks++; 709 } 710 711 v.x = x; 712 v.y = y; 713 714 for (i = 0; i < 4; ++i) { 715 v_dst[i] = v; 716 } 717} 718 719void 720vl_idct_flush(struct vl_idct *idct) 721{ 722 xfer_buffers_unmap(idct); 723 724 if(idct->num_blocks > 0) { 725 726 /* first stage */ 727 idct->fb_state.cbufs[0] = idct->surfaces.intermediate; 728 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state); 729 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport); 730 731 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all); 732 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state); 733 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[0]); 734 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]); 735 idct->pipe->bind_vs_state(idct->pipe, idct->vs); 736 idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs); 737 738 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4); 739 740 /* second stage */ 741 idct->fb_state.cbufs[0] = idct->surfaces.destination; 742 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state); 743 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport); 744 745 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all); 746 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state); 747 idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[1]); 748 idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]); 749 idct->pipe->bind_vs_state(idct->pipe, idct->vs); 750 idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs); 751 752 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4); 753 } 754 755 if(idct->num_empty_blocks > 0) { 756 757 /* empty block handling */ 758 idct->fb_state.cbufs[0] = idct->surfaces.destination; 759 idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state); 760 idct->pipe->set_viewport_state(idct->pipe, &idct->viewport); 761 762 idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all); 763 idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state); 764 idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all); 765 idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all); 766 idct->pipe->bind_vs_state(idct->pipe, idct->vs); 767 idct->pipe->bind_fs_state(idct->pipe, idct->eb_fs); 768 769 util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 770 (idct->max_blocks - idct->num_empty_blocks) * 4, 771 idct->num_empty_blocks * 4); 772 } 773 774 idct->num_blocks = 0; 775 idct->num_empty_blocks = 0; 776 xfer_buffers_map(idct); 777} 778