vl_idct.c revision 0b749d6dcb537472771d6fe6e454aafc916ab3fe
1/**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vl_idct.h"
29#include "vl_vertex_buffers.h"
30#include "util/u_draw.h"
31#include <assert.h>
32#include <pipe/p_context.h>
33#include <pipe/p_screen.h>
34#include <util/u_inlines.h>
35#include <util/u_sampler.h>
36#include <util/u_format.h>
37#include <tgsi/tgsi_ureg.h>
38#include "vl_types.h"
39
40#define BLOCK_WIDTH 8
41#define BLOCK_HEIGHT 8
42
43#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
44
45#define STAGE1_SCALE 4.0f
46#define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE)
47
48#define NR_RENDER_TARGETS 1
49
50enum VS_INPUT
51{
52   VS_I_RECT,
53   VS_I_VPOS,
54
55   NUM_VS_INPUTS
56};
57
58enum VS_OUTPUT
59{
60   VS_O_VPOS,
61   VS_O_BLOCK,
62   VS_O_TEX,
63   VS_O_START
64};
65
66static const float const_matrix[8][8] = {
67   {  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.353553f,  0.3535530f },
68   {  0.4903930f,  0.4157350f,  0.2777850f,  0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
69   {  0.4619400f,  0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f,  0.191342f,  0.4619400f },
70   {  0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f,  0.2777850f,  0.4903930f,  0.097545f, -0.4157350f },
71   {  0.3535530f, -0.3535530f, -0.3535530f,  0.3535540f,  0.3535530f, -0.3535540f, -0.353553f,  0.3535530f },
72   {  0.2777850f, -0.4903930f,  0.0975452f,  0.4157350f, -0.4157350f, -0.0975451f,  0.490393f, -0.2777850f },
73   {  0.1913420f, -0.4619400f,  0.4619400f, -0.1913420f, -0.1913410f,  0.4619400f, -0.461940f,  0.1913420f },
74   {  0.0975451f, -0.2777850f,  0.4157350f, -0.4903930f,  0.4903930f, -0.4157350f,  0.277786f, -0.0975458f }
75};
76
77static void *
78create_vert_shader(struct vl_idct *idct)
79{
80   struct ureg_program *shader;
81   struct ureg_src scale;
82   struct ureg_src vrect, vpos;
83   struct ureg_dst t_vpos;
84   struct ureg_dst o_vpos, o_block, o_tex, o_start;
85
86   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
87   if (!shader)
88      return NULL;
89
90   t_vpos = ureg_DECL_temporary(shader);
91
92   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
93   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
94
95   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
96   o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
97   o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
98   o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
99
100   /*
101    * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
102    *
103    * t_vpos = vpos + vrect
104    * o_vpos.xy = t_vpos * scale
105    * o_vpos.zw = vpos
106    *
107    * o_block = vrect
108    * o_tex = t_pos
109    * o_start = vpos * scale
110    *
111    */
112   scale = ureg_imm2f(shader,
113      (float)BLOCK_WIDTH / idct->buffer_width,
114      (float)BLOCK_HEIGHT / idct->buffer_height);
115
116   ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
117   ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), scale);
118   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
119   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
120
121   ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
122   ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
123   ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, scale);
124
125   ureg_release_temporary(shader, t_vpos);
126
127   ureg_END(shader);
128
129   return ureg_create_shader_and_destroy(shader, idct->pipe);
130}
131
132static void
133fetch_four(struct ureg_program *shader, struct ureg_dst m[2],
134           struct ureg_src tc, struct ureg_src sampler,
135           struct ureg_src start, struct ureg_src block,
136           bool right_side, bool transposed, float size)
137{
138   struct ureg_dst t_tc;
139   unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
140   unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
141
142   t_tc = ureg_DECL_temporary(shader);
143   m[0] = ureg_DECL_temporary(shader);
144   m[1] = ureg_DECL_temporary(shader);
145
146   /*
147    * t_tc.x = right_side ? start.x : tc.x
148    * t_tc.y = right_side ? tc.y : start.y
149    * m[0..1] = tex(t_tc++, sampler)
150    */
151   if(!right_side) {
152      ureg_MOV(shader, ureg_writemask(t_tc, wm_start), ureg_scalar(start, TGSI_SWIZZLE_X));
153      ureg_MOV(shader, ureg_writemask(t_tc, wm_tc), ureg_scalar(tc, TGSI_SWIZZLE_Y));
154   } else {
155      ureg_MOV(shader, ureg_writemask(t_tc, wm_start), ureg_scalar(start, TGSI_SWIZZLE_Y));
156      ureg_MOV(shader, ureg_writemask(t_tc, wm_tc), ureg_scalar(tc, TGSI_SWIZZLE_X));
157   }
158
159#if NR_RENDER_TARGETS == 8
160   ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(block, TGSI_SWIZZLE_X));
161#else
162   ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_imm1f(shader, 0.0f));
163#endif
164
165   ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, ureg_src(t_tc), sampler);
166   ureg_ADD(shader, ureg_writemask(t_tc, wm_start), ureg_src(t_tc), ureg_imm1f(shader, 1.0f / size));
167   ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, ureg_src(t_tc), sampler);
168
169   ureg_release_temporary(shader, t_tc);
170}
171
172static void
173matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
174{
175   struct ureg_dst tmp[2];
176   unsigned i;
177
178   for(i = 0; i < 2; ++i) {
179      tmp[i] = ureg_DECL_temporary(shader);
180   }
181
182   /*
183    * tmp[0..1] = dot4(m[0][0..1], m[1][0..1])
184    * dst = tmp[0] + tmp[1]
185    */
186   ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
187   ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(l[1]), ureg_src(r[1]));
188   ureg_ADD(shader, dst,
189      ureg_scalar(ureg_src(tmp[0]), TGSI_SWIZZLE_X),
190      ureg_scalar(ureg_src(tmp[1]), TGSI_SWIZZLE_X));
191
192   for(i = 0; i < 2; ++i) {
193      ureg_release_temporary(shader, tmp[i]);
194   }
195}
196
197static void *
198create_transpose_frag_shader(struct vl_idct *idct)
199{
200   struct ureg_program *shader;
201
202   struct ureg_src block, tex, sampler[2];
203   struct ureg_src start[2];
204
205   struct ureg_dst l[2], r[2];
206   struct ureg_dst tmp, fragment;
207
208   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
209   if (!shader)
210      return NULL;
211
212   block = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
213   tex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_CONSTANT);
214
215   sampler[0] = ureg_DECL_sampler(shader, 0);
216   sampler[1] = ureg_DECL_sampler(shader, 1);
217
218   start[0] = ureg_imm1f(shader, 0.0f);
219   start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
220
221   fetch_four(shader, l, block, sampler[0], start[0], block, false, false, BLOCK_WIDTH / 4);
222   fetch_four(shader, r, tex, sampler[1], start[1], block, true, false, idct->buffer_height / 4);
223
224   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
225
226   tmp = ureg_DECL_temporary(shader);
227   matrix_mul(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), l, r);
228   ureg_MUL(shader, fragment, ureg_src(tmp), ureg_imm1f(shader, STAGE2_SCALE));
229
230   ureg_release_temporary(shader, tmp);
231   ureg_release_temporary(shader, l[0]);
232   ureg_release_temporary(shader, l[1]);
233   ureg_release_temporary(shader, r[0]);
234   ureg_release_temporary(shader, r[1]);
235
236   ureg_END(shader);
237
238   return ureg_create_shader_and_destroy(shader, idct->pipe);
239}
240
241static void *
242create_matrix_frag_shader(struct vl_idct *idct)
243{
244   struct ureg_program *shader;
245
246   struct ureg_src tex, block, sampler[2];
247   struct ureg_src start[2];
248
249   struct ureg_dst l[4][2], r[2];
250   struct ureg_dst t_tc, tmp, fragment[NR_RENDER_TARGETS];
251
252   unsigned i, j;
253
254   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
255   if (!shader)
256      return NULL;
257
258   t_tc = ureg_DECL_temporary(shader);
259   tmp = ureg_DECL_temporary(shader);
260
261   tex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
262   block = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
263
264   sampler[0] = ureg_DECL_sampler(shader, 1);
265   sampler[1] = ureg_DECL_sampler(shader, 0);
266
267   start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
268   start[1] = ureg_imm1f(shader, 0.0f);
269
270   for (i = 0; i < NR_RENDER_TARGETS; ++i)
271       fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
272
273   ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), tex);
274   for (i = 0; i < 4; ++i) {
275      fetch_four(shader, l[i], ureg_src(t_tc), sampler[0], start[0], block, false, false, idct->buffer_width / 4);
276      ureg_MUL(shader, l[i][0], ureg_src(l[i][0]), ureg_imm1f(shader, STAGE1_SCALE));
277      ureg_MUL(shader, l[i][1], ureg_src(l[i][1]), ureg_imm1f(shader, STAGE1_SCALE));
278      if(i != 3)
279         ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y),
280            ureg_src(t_tc), ureg_imm1f(shader, 1.0f / idct->buffer_height));
281   }
282
283   for (i = 0; i < NR_RENDER_TARGETS; ++i) {
284
285#if NR_RENDER_TARGETS == 8
286      ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_imm1f(shader, 1.0f / BLOCK_WIDTH * i));
287      fetch_four(shader, r, ureg_src(t_tc), sampler[1], start[1], block, true, true, BLOCK_WIDTH / 4);
288#elif NR_RENDER_TARGETS == 1
289      fetch_four(shader, r, block, sampler[1], start[1], block, true, true, BLOCK_WIDTH / 4);
290#else
291#error invalid number of render targets
292#endif
293
294      for (j = 0; j < 4; ++j) {
295         matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
296      }
297      ureg_release_temporary(shader, r[0]);
298      ureg_release_temporary(shader, r[1]);
299   }
300
301   ureg_release_temporary(shader, t_tc);
302   ureg_release_temporary(shader, tmp);
303
304   for (i = 0; i < 4; ++i) {
305      ureg_release_temporary(shader, l[i][0]);
306      ureg_release_temporary(shader, l[i][1]);
307   }
308
309   ureg_END(shader);
310
311   return ureg_create_shader_and_destroy(shader, idct->pipe);
312}
313
314static bool
315init_shaders(struct vl_idct *idct)
316{
317   idct->vs = create_vert_shader(idct);
318   idct->matrix_fs = create_matrix_frag_shader(idct);
319   idct->transpose_fs = create_transpose_frag_shader(idct);
320
321   return
322      idct->vs != NULL &&
323      idct->transpose_fs != NULL &&
324      idct->matrix_fs != NULL;
325}
326
327static void
328cleanup_shaders(struct vl_idct *idct)
329{
330   idct->pipe->delete_vs_state(idct->pipe, idct->vs);
331   idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
332   idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
333}
334
335static bool
336init_state(struct vl_idct *idct)
337{
338   struct pipe_vertex_element vertex_elems[NUM_VS_INPUTS];
339   struct pipe_sampler_state sampler;
340   struct pipe_rasterizer_state rs_state;
341   unsigned i;
342
343   assert(idct);
344
345   idct->quad = vl_vb_upload_quads(idct->pipe, idct->max_blocks);
346
347   if(idct->quad.buffer == NULL)
348      return false;
349
350   for (i = 0; i < 4; ++i) {
351      memset(&sampler, 0, sizeof(sampler));
352      sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
353      sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
354      sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
355      sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
356      sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
357      sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
358      sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
359      sampler.compare_func = PIPE_FUNC_ALWAYS;
360      sampler.normalized_coords = 1;
361      /*sampler.shadow_ambient = ; */
362      /*sampler.lod_bias = ; */
363      sampler.min_lod = 0;
364      /*sampler.max_lod = ; */
365      /*sampler.border_color[0] = ; */
366      /*sampler.max_anisotropy = ; */
367      idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
368   }
369
370   memset(&rs_state, 0, sizeof(rs_state));
371   /*rs_state.sprite_coord_enable */
372   rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
373   rs_state.point_quad_rasterization = true;
374   rs_state.point_size = BLOCK_WIDTH;
375   rs_state.gl_rasterization_rules = false;
376   idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
377
378   vertex_elems[VS_I_RECT] = vl_vb_get_quad_vertex_element();
379
380   /* Pos element */
381   vertex_elems[VS_I_VPOS].src_format = PIPE_FORMAT_R32G32_FLOAT;
382
383   idct->vertex_buffer_stride = vl_vb_element_helper(&vertex_elems[VS_I_VPOS], 1, 1);
384   idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
385
386   return true;
387}
388
389static void
390cleanup_state(struct vl_idct *idct)
391{
392   unsigned i;
393
394   for (i = 0; i < 4; ++i)
395      idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
396
397   idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
398   idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
399}
400
401static bool
402init_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer)
403{
404   struct pipe_resource template;
405   struct pipe_sampler_view sampler_view;
406   unsigned i;
407
408   assert(idct && buffer);
409
410   /* create textures */
411   memset(&template, 0, sizeof(struct pipe_resource));
412   template.last_level = 0;
413   template.depth0 = 1;
414   template.bind = PIPE_BIND_SAMPLER_VIEW;
415   template.flags = 0;
416
417   template.target = PIPE_TEXTURE_2D;
418   template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
419   template.width0 = idct->buffer_width / 4;
420   template.height0 = idct->buffer_height;
421   template.depth0 = 1;
422   template.usage = PIPE_USAGE_STREAM;
423   buffer->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
424
425   template.target = PIPE_TEXTURE_3D;
426   template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
427   template.width0 = idct->buffer_width / NR_RENDER_TARGETS;
428   template.height0 = idct->buffer_height / 4;
429   template.depth0 = NR_RENDER_TARGETS;
430   template.usage = PIPE_USAGE_STATIC;
431   buffer->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
432
433   for (i = 0; i < 4; ++i) {
434      if(buffer->textures.all[i] == NULL)
435         return false; /* a texture failed to allocate */
436
437      u_sampler_view_default_template(&sampler_view, buffer->textures.all[i], buffer->textures.all[i]->format);
438      buffer->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, buffer->textures.all[i], &sampler_view);
439   }
440
441   return true;
442}
443
444static void
445cleanup_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer)
446{
447   unsigned i;
448
449   assert(idct && buffer);
450
451   for (i = 0; i < 4; ++i) {
452      pipe_sampler_view_reference(&buffer->sampler_views.all[i], NULL);
453      pipe_resource_reference(&buffer->textures.all[i], NULL);
454   }
455}
456
457static bool
458init_vertex_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
459{
460   assert(idct && buffer);
461
462   buffer->vertex_bufs.individual.quad.stride = idct->quad.stride;
463   buffer->vertex_bufs.individual.quad.max_index = idct->quad.max_index;
464   buffer->vertex_bufs.individual.quad.buffer_offset = idct->quad.buffer_offset;
465   pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, idct->quad.buffer);
466
467   buffer->vertex_bufs.individual.pos = vl_vb_create_buffer(idct->pipe, idct->max_blocks, idct->vertex_buffer_stride);
468
469   if(buffer->vertex_bufs.individual.pos.buffer == NULL)
470      return false;
471
472   if (!vl_vb_init(&buffer->blocks, idct->max_blocks, 2))
473      return false;
474
475   return true;
476}
477
478static void
479cleanup_vertex_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
480{
481   assert(idct && buffer);
482
483   pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, NULL);
484   pipe_resource_reference(&buffer->vertex_bufs.individual.pos.buffer, NULL);
485
486   vl_vb_cleanup(&buffer->blocks);
487}
488
489struct pipe_resource *
490vl_idct_upload_matrix(struct pipe_context *pipe)
491{
492   struct pipe_resource template, *matrix;
493   struct pipe_transfer *buf_transfer;
494   unsigned i, j, pitch;
495   float *f;
496
497   struct pipe_box rect =
498   {
499      0, 0, 0,
500      BLOCK_WIDTH,
501      BLOCK_HEIGHT,
502      1
503   };
504
505   memset(&template, 0, sizeof(struct pipe_resource));
506   template.target = PIPE_TEXTURE_2D;
507   template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
508   template.last_level = 0;
509   template.width0 = 2;
510   template.height0 = 8;
511   template.depth0 = 1;
512   template.usage = PIPE_USAGE_IMMUTABLE;
513   template.bind = PIPE_BIND_SAMPLER_VIEW;
514   template.flags = 0;
515
516   matrix = pipe->screen->resource_create(pipe->screen, &template);
517
518   /* matrix */
519   buf_transfer = pipe->get_transfer
520   (
521      pipe, matrix,
522      u_subresource(0, 0),
523      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
524      &rect
525   );
526   pitch = buf_transfer->stride / sizeof(float);
527
528   f = pipe->transfer_map(pipe, buf_transfer);
529   for(i = 0; i < BLOCK_HEIGHT; ++i)
530      for(j = 0; j < BLOCK_WIDTH; ++j)
531         f[i * pitch + j] = const_matrix[j][i]; // transpose
532
533   pipe->transfer_unmap(pipe, buf_transfer);
534   pipe->transfer_destroy(pipe, buf_transfer);
535
536   return matrix;
537}
538
539bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
540                  unsigned buffer_width, unsigned buffer_height,
541                  struct pipe_resource *matrix)
542{
543   assert(idct && pipe && matrix);
544
545   idct->pipe = pipe;
546   idct->buffer_width = buffer_width;
547   idct->buffer_height = buffer_height;
548   pipe_resource_reference(&idct->matrix, matrix);
549
550   idct->max_blocks =
551      align(buffer_width, BLOCK_WIDTH) / BLOCK_WIDTH *
552      align(buffer_height, BLOCK_HEIGHT) / BLOCK_HEIGHT;
553
554   if(!init_shaders(idct))
555      return false;
556
557   if(!init_state(idct)) {
558      cleanup_shaders(idct);
559      return false;
560   }
561
562   return true;
563}
564
565void
566vl_idct_cleanup(struct vl_idct *idct)
567{
568   cleanup_shaders(idct);
569   cleanup_state(idct);
570
571   pipe_resource_reference(&idct->matrix, NULL);
572}
573
574bool
575vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer, struct pipe_resource *dst)
576{
577   unsigned i;
578
579   assert(buffer);
580   assert(idct);
581   assert(dst);
582
583   pipe_resource_reference(&buffer->textures.individual.matrix, idct->matrix);
584   pipe_resource_reference(&buffer->textures.individual.transpose, idct->matrix);
585   pipe_resource_reference(&buffer->destination, dst);
586
587   if (!init_textures(idct, buffer))
588      return false;
589
590   if (!init_vertex_buffers(idct, buffer))
591      return false;
592
593   /* init state */
594   buffer->viewport[0].scale[0] = buffer->textures.individual.intermediate->width0;
595   buffer->viewport[0].scale[1] = buffer->textures.individual.intermediate->height0;
596
597   buffer->viewport[1].scale[0] = buffer->destination->width0;
598   buffer->viewport[1].scale[1] = buffer->destination->height0;
599
600   buffer->fb_state[0].width = buffer->textures.individual.intermediate->width0;
601   buffer->fb_state[0].height = buffer->textures.individual.intermediate->height0;
602
603   buffer->fb_state[0].nr_cbufs = NR_RENDER_TARGETS;
604   for(i = 0; i < NR_RENDER_TARGETS; ++i) {
605      buffer->fb_state[0].cbufs[i] = idct->pipe->screen->get_tex_surface(
606         idct->pipe->screen, buffer->textures.individual.intermediate, 0, 0, i,
607         PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
608   }
609
610   buffer->fb_state[1].width = buffer->destination->width0;
611   buffer->fb_state[1].height = buffer->destination->height0;
612
613   buffer->fb_state[1].nr_cbufs = 1;
614   buffer->fb_state[1].cbufs[0] = idct->pipe->screen->get_tex_surface(
615      idct->pipe->screen, buffer->destination, 0, 0, 0,
616      PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
617
618   for(i = 0; i < 2; ++i) {
619      buffer->viewport[i].scale[2] = 1;
620      buffer->viewport[i].scale[3] = 1;
621      buffer->viewport[i].translate[0] = 0;
622      buffer->viewport[i].translate[1] = 0;
623      buffer->viewport[i].translate[2] = 0;
624      buffer->viewport[i].translate[3] = 0;
625
626      buffer->fb_state[i].zsbuf = NULL;
627   }
628
629   return true;
630}
631
632void
633vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer)
634{
635   unsigned i;
636
637   assert(buffer);
638
639   for(i = 0; i < NR_RENDER_TARGETS; ++i) {
640      idct->pipe->screen->tex_surface_destroy(buffer->fb_state[0].cbufs[i]);
641   }
642
643   idct->pipe->screen->tex_surface_destroy(buffer->fb_state[1].cbufs[0]);
644
645   cleanup_textures(idct, buffer);
646   cleanup_vertex_buffers(idct, buffer);
647}
648
649void
650vl_idct_map_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
651{
652   assert(idct);
653
654   struct pipe_box rect =
655   {
656      0, 0, 0,
657      buffer->textures.individual.source->width0,
658      buffer->textures.individual.source->height0,
659      1
660   };
661
662   buffer->tex_transfer = idct->pipe->get_transfer
663   (
664      idct->pipe, buffer->textures.individual.source,
665      u_subresource(0, 0),
666      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
667      &rect
668   );
669
670   buffer->texels = idct->pipe->transfer_map(idct->pipe, buffer->tex_transfer);
671}
672
673void
674vl_idct_add_block(struct vl_idct_buffer *buffer, unsigned x, unsigned y, short *block)
675{
676   struct vertex2f v;
677   unsigned tex_pitch;
678   short *texels;
679
680   unsigned i;
681
682   assert(buffer);
683
684   tex_pitch = buffer->tex_transfer->stride / sizeof(short);
685   texels = buffer->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
686
687   for (i = 0; i < BLOCK_HEIGHT; ++i)
688      memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short));
689
690   v.x = x;
691   v.y = y;
692   vl_vb_add_block(&buffer->blocks, (float*)&v);
693}
694
695void
696vl_idct_unmap_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
697{
698   assert(idct && buffer);
699
700   idct->pipe->transfer_unmap(idct->pipe, buffer->tex_transfer);
701   idct->pipe->transfer_destroy(idct->pipe, buffer->tex_transfer);
702}
703
704void
705vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer)
706{
707   struct pipe_transfer *vec_transfer;
708   void *vectors;
709   unsigned num_verts;
710
711   assert(idct);
712
713   vectors = pipe_buffer_map
714   (
715      idct->pipe,
716      buffer->vertex_bufs.individual.pos.buffer,
717      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
718      &vec_transfer
719   );
720
721   num_verts = vl_vb_upload(&buffer->blocks, vectors);
722
723   pipe_buffer_unmap(idct->pipe, buffer->vertex_bufs.individual.pos.buffer, vec_transfer);
724
725   if(num_verts > 0) {
726
727      idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
728      idct->pipe->set_vertex_buffers(idct->pipe, 2, buffer->vertex_bufs.all);
729      idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
730      idct->pipe->bind_vs_state(idct->pipe, idct->vs);
731
732      /* first stage */
733      idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[0]);
734      idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[0]);
735      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]);
736      idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
737      idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
738      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts);
739
740      /* second stage */
741      idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[1]);
742      idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[1]);
743      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]);
744      idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
745      idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
746      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts);
747   }
748}
749