vl_idct.c revision ccc80d2c09ad35f867c0c0a85f7e1cadd73941bb
1/**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <assert.h>
29
30#include <pipe/p_context.h>
31#include <pipe/p_screen.h>
32
33#include <util/u_draw.h>
34#include <util/u_sampler.h>
35
36#include <tgsi/tgsi_ureg.h>
37
38#include "vl_defines.h"
39#include "vl_types.h"
40#include "vl_vertex_buffers.h"
41#include "vl_idct.h"
42
43enum VS_OUTPUT
44{
45   VS_O_VPOS,
46   VS_O_L_ADDR0,
47   VS_O_L_ADDR1,
48   VS_O_R_ADDR0,
49   VS_O_R_ADDR1
50};
51
52static const float const_matrix[8][8] = {
53   {  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.353553f,  0.3535530f },
54   {  0.4903930f,  0.4157350f,  0.2777850f,  0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
55   {  0.4619400f,  0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f,  0.191342f,  0.4619400f },
56   {  0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f,  0.2777850f,  0.4903930f,  0.097545f, -0.4157350f },
57   {  0.3535530f, -0.3535530f, -0.3535530f,  0.3535540f,  0.3535530f, -0.3535540f, -0.353553f,  0.3535530f },
58   {  0.2777850f, -0.4903930f,  0.0975452f,  0.4157350f, -0.4157350f, -0.0975451f,  0.490393f, -0.2777850f },
59   {  0.1913420f, -0.4619400f,  0.4619400f, -0.1913420f, -0.1913410f,  0.4619400f, -0.461940f,  0.1913420f },
60   {  0.0975451f, -0.2777850f,  0.4157350f, -0.4903930f,  0.4903930f, -0.4157350f,  0.277786f, -0.0975458f }
61};
62
63static void
64calc_addr(struct ureg_program *shader, struct ureg_dst addr[2],
65          struct ureg_src tc, struct ureg_src start, bool right_side,
66          bool transposed, float size)
67{
68   unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
69   unsigned sw_start = right_side ? TGSI_SWIZZLE_Y : TGSI_SWIZZLE_X;
70
71   unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
72   unsigned sw_tc = right_side ? TGSI_SWIZZLE_X : TGSI_SWIZZLE_Y;
73
74   /*
75    * addr[0..1].(start) = right_side ? start.x : tc.x
76    * addr[0..1].(tc) = right_side ? tc.y : start.y
77    * addr[0..1].z = tc.z
78    * addr[1].(start) += 1.0f / scale
79    */
80   ureg_MOV(shader, ureg_writemask(addr[0], wm_start), ureg_scalar(start, sw_start));
81   ureg_MOV(shader, ureg_writemask(addr[0], wm_tc), ureg_scalar(tc, sw_tc));
82   ureg_MOV(shader, ureg_writemask(addr[0], TGSI_WRITEMASK_Z), tc);
83
84   ureg_ADD(shader, ureg_writemask(addr[1], wm_start), ureg_scalar(start, sw_start), ureg_imm1f(shader, 1.0f / size));
85   ureg_MOV(shader, ureg_writemask(addr[1], wm_tc), ureg_scalar(tc, sw_tc));
86   ureg_MOV(shader, ureg_writemask(addr[1], TGSI_WRITEMASK_Z), tc);
87}
88
89static void *
90create_vert_shader(struct vl_idct *idct, bool matrix_stage)
91{
92   struct ureg_program *shader;
93   struct ureg_src vrect, vpos, vblock, eb;
94   struct ureg_src scale, blocks_xy;
95   struct ureg_dst t_tex, t_start;
96   struct ureg_dst o_vpos, o_l_addr[2], o_r_addr[2];
97   unsigned label;
98
99   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
100   if (!shader)
101      return NULL;
102
103   t_tex = ureg_DECL_temporary(shader);
104   t_start = ureg_DECL_temporary(shader);
105
106   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
107   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
108   vblock = ureg_swizzle(vrect, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W, TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
109
110   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
111
112   eb = ureg_DECL_vs_input(shader, VS_I_EB);
113
114   o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0);
115   o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1);
116
117   o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0);
118   o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1);
119
120   /*
121    * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
122    * blocks_xy = (blocks_x, blocks_y)
123    *
124    * if eb.(vblock.y, vblock.x)
125    *    o_vpos.xy = -1
126    * else
127    *    t_tex = vpos * blocks_xy + vblock
128    *    t_start = t_tex * scale
129    *    t_tex = t_tex + vrect
130    *    o_vpos.xy = t_tex * scale
131    *
132    *    o_l_addr = calc_addr(...)
133    *    o_r_addr = calc_addr(...)
134    * endif
135    * o_vpos.zw = vpos
136    *
137    */
138
139   scale = ureg_imm2f(shader,
140      (float)BLOCK_WIDTH / idct->buffer_width,
141      (float)BLOCK_HEIGHT / idct->buffer_height);
142
143   blocks_xy = ureg_imm2f(shader, idct->blocks_x, idct->blocks_y);
144
145   if (idct->blocks_x > 1 || idct->blocks_y > 1) {
146      ureg_CMP(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY),
147         ureg_negate(ureg_scalar(vblock, TGSI_SWIZZLE_Y)),
148         ureg_swizzle(eb, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W),
149         ureg_swizzle(eb, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y));
150
151      ureg_CMP(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_X),
152         ureg_negate(ureg_scalar(vblock, TGSI_SWIZZLE_X)),
153         ureg_scalar(ureg_src(t_tex), TGSI_SWIZZLE_Y),
154         ureg_scalar(ureg_src(t_tex), TGSI_SWIZZLE_X));
155
156      eb = ureg_src(t_tex);
157   }
158
159   ureg_IF(shader, ureg_scalar(eb, TGSI_SWIZZLE_X), &label);
160
161      ureg_MOV(shader, o_vpos, ureg_imm1f(shader, -1.0f));
162
163   ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
164   ureg_ELSE(shader, &label);
165
166      ureg_MAD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, blocks_xy, vblock);
167      ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
168
169      ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), vrect);
170
171      ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
172      ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_Z),
173         ureg_scalar(vrect, TGSI_SWIZZLE_X),
174         ureg_imm1f(shader, BLOCK_WIDTH / idct->nr_of_render_targets));
175
176      ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_tex));
177
178      if(matrix_stage) {
179         calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4);
180         calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4);
181      } else {
182         calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4);
183         calc_addr(shader, o_r_addr, ureg_src(t_tex), ureg_src(t_start), true, false, idct->buffer_height / 4);
184      }
185
186   ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
187   ureg_ENDIF(shader);
188
189   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
190
191   ureg_release_temporary(shader, t_tex);
192   ureg_release_temporary(shader, t_start);
193
194   ureg_END(shader);
195
196   return ureg_create_shader_and_destroy(shader, idct->pipe);
197}
198
199static void
200increment_addr(struct ureg_program *shader, struct ureg_dst daddr[2],
201               struct ureg_src saddr[2], bool right_side, bool transposed,
202               int pos, float size)
203{
204   unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
205   unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
206
207   /*
208    * daddr[0..1].(start) = saddr[0..1].(start)
209    * daddr[0..1].(tc) = saddr[0..1].(tc)
210    */
211
212   ureg_MOV(shader, ureg_writemask(daddr[0], wm_start), saddr[0]);
213   ureg_ADD(shader, ureg_writemask(daddr[0], wm_tc), saddr[0], ureg_imm1f(shader, pos / size));
214   ureg_MOV(shader, ureg_writemask(daddr[1], wm_start), saddr[1]);
215   ureg_ADD(shader, ureg_writemask(daddr[1], wm_tc), saddr[1], ureg_imm1f(shader, pos / size));
216}
217
218static void
219fetch_four(struct ureg_program *shader, struct ureg_dst m[2], struct ureg_src addr[2], struct ureg_src sampler)
220{
221   ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, addr[0], sampler);
222   ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, addr[1], sampler);
223}
224
225static void
226matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
227{
228   struct ureg_dst tmp;
229
230   tmp = ureg_DECL_temporary(shader);
231
232   /*
233    * tmp.xy = dot4(m[0][0..1], m[1][0..1])
234    * dst = tmp.x + tmp.y
235    */
236   ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
237   ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1]));
238   ureg_ADD(shader, dst,
239      ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X),
240      ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
241
242   ureg_release_temporary(shader, tmp);
243}
244
245static void *
246create_matrix_frag_shader(struct vl_idct *idct)
247{
248   struct ureg_program *shader;
249
250   struct ureg_src l_addr[2], r_addr[2];
251
252   struct ureg_dst l[4][2], r[2];
253   struct ureg_dst fragment[idct->nr_of_render_targets];
254
255   unsigned i, j;
256
257   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
258   if (!shader)
259      return NULL;
260
261   l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
262   l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
263
264   r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
265   r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
266
267   for (i = 0; i < idct->nr_of_render_targets; ++i)
268       fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
269
270   for (i = 0; i < 4; ++i) {
271      l[i][0] = ureg_DECL_temporary(shader);
272      l[i][1] = ureg_DECL_temporary(shader);
273   }
274
275   r[0] = ureg_DECL_temporary(shader);
276   r[1] = ureg_DECL_temporary(shader);
277
278   for (i = 1; i < 4; ++i) {
279      increment_addr(shader, l[i], l_addr, false, false, i, idct->buffer_height);
280   }
281
282   for (i = 0; i < 4; ++i) {
283      struct ureg_src s_addr[2];
284      s_addr[0] = i == 0 ? l_addr[0] : ureg_src(l[i][0]);
285      s_addr[1] = i == 0 ? l_addr[1] : ureg_src(l[i][1]);
286      fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 1));
287   }
288
289   for (i = 0; i < idct->nr_of_render_targets; ++i) {
290      if(i > 0)
291         increment_addr(shader, r, r_addr, true, true, i, BLOCK_HEIGHT);
292
293      struct ureg_src s_addr[2] = { ureg_src(r[0]), ureg_src(r[1]) };
294      s_addr[0] = i == 0 ? r_addr[0] : ureg_src(r[0]);
295      s_addr[1] = i == 0 ? r_addr[1] : ureg_src(r[1]);
296      fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 0));
297
298      for (j = 0; j < 4; ++j) {
299         matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
300      }
301   }
302
303   for (i = 0; i < 4; ++i) {
304      ureg_release_temporary(shader, l[i][0]);
305      ureg_release_temporary(shader, l[i][1]);
306   }
307   ureg_release_temporary(shader, r[0]);
308   ureg_release_temporary(shader, r[1]);
309
310   ureg_END(shader);
311
312   return ureg_create_shader_and_destroy(shader, idct->pipe);
313}
314
315static void *
316create_transpose_frag_shader(struct vl_idct *idct)
317{
318   struct ureg_program *shader;
319
320   struct ureg_src l_addr[2], r_addr[2];
321
322   struct ureg_dst l[2], r[2];
323   struct ureg_dst fragment;
324
325   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
326   if (!shader)
327      return NULL;
328
329   l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
330   l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
331
332   r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
333   r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
334
335   l[0] = ureg_DECL_temporary(shader);
336   l[1] = ureg_DECL_temporary(shader);
337   r[0] = ureg_DECL_temporary(shader);
338   r[1] = ureg_DECL_temporary(shader);
339
340   fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 0));
341   fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 1));
342
343   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
344
345   matrix_mul(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X), l, r);
346
347   ureg_release_temporary(shader, l[0]);
348   ureg_release_temporary(shader, l[1]);
349   ureg_release_temporary(shader, r[0]);
350   ureg_release_temporary(shader, r[1]);
351
352   ureg_END(shader);
353
354   return ureg_create_shader_and_destroy(shader, idct->pipe);
355}
356
357static bool
358init_shaders(struct vl_idct *idct)
359{
360   idct->matrix_vs = create_vert_shader(idct, true);
361   if (!idct->matrix_vs)
362      goto error_matrix_vs;
363
364   idct->matrix_fs = create_matrix_frag_shader(idct);
365   if (!idct->matrix_fs)
366      goto error_matrix_fs;
367
368   idct->transpose_vs = create_vert_shader(idct, false);
369   if (!idct->transpose_vs)
370      goto error_transpose_vs;
371
372   idct->transpose_fs = create_transpose_frag_shader(idct);
373   if (!idct->transpose_fs)
374      goto error_transpose_fs;
375
376   return true;
377
378error_transpose_fs:
379   idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
380
381error_transpose_vs:
382   idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
383
384error_matrix_fs:
385   idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
386
387error_matrix_vs:
388   return false;
389}
390
391static void
392cleanup_shaders(struct vl_idct *idct)
393{
394   idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
395   idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
396   idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
397   idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
398}
399
400static bool
401init_state(struct vl_idct *idct)
402{
403   struct pipe_sampler_state sampler;
404   struct pipe_rasterizer_state rs_state;
405   unsigned i;
406
407   assert(idct);
408
409   memset(&rs_state, 0, sizeof(rs_state));
410   rs_state.gl_rasterization_rules = false;
411   idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
412   if (!idct->rs_state)
413      goto error_rs_state;
414
415   for (i = 0; i < 2; ++i) {
416      memset(&sampler, 0, sizeof(sampler));
417      sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
418      sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
419      sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
420      sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
421      sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
422      sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
423      sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
424      sampler.compare_func = PIPE_FUNC_ALWAYS;
425      sampler.normalized_coords = 1;
426      idct->samplers[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
427      if (!idct->samplers[i])
428         goto error_samplers;
429   }
430
431   return true;
432
433error_samplers:
434   for (i = 0; i < 2; ++i)
435      if (idct->samplers[i])
436         idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
437
438   idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
439
440error_rs_state:
441   return false;
442}
443
444static void
445cleanup_state(struct vl_idct *idct)
446{
447   unsigned i;
448
449   for (i = 0; i < 2; ++i)
450      idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
451
452   idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
453}
454
455static bool
456init_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
457{
458   struct pipe_resource *tex;
459   struct pipe_surface surf_templ;
460   unsigned i;
461
462   assert(idct && buffer);
463
464   tex = buffer->sampler_views.individual.intermediate->texture;
465
466   buffer->fb_state[0].width = tex->width0;
467   buffer->fb_state[0].height = tex->height0;
468   buffer->fb_state[0].nr_cbufs = idct->nr_of_render_targets;
469   for(i = 0; i < idct->nr_of_render_targets; ++i) {
470      memset(&surf_templ, 0, sizeof(surf_templ));
471      surf_templ.format = tex->format;
472      surf_templ.u.tex.first_layer = i;
473      surf_templ.u.tex.last_layer = i;
474      surf_templ.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
475      buffer->fb_state[0].cbufs[i] = idct->pipe->create_surface(
476         idct->pipe, tex, &surf_templ);
477
478      if (!buffer->fb_state[0].cbufs[i])
479         goto error_surfaces;
480   }
481
482   buffer->viewport[0].scale[0] = tex->width0;
483   buffer->viewport[0].scale[1] = tex->height0;
484
485   return true;
486
487error_surfaces:
488   for(i = 0; i < idct->nr_of_render_targets; ++i)
489      pipe_surface_reference(&buffer->fb_state[0].cbufs[i], NULL);
490
491   return false;
492}
493
494static void
495cleanup_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
496{
497   unsigned i;
498
499   assert(idct && buffer);
500
501   for(i = 0; i < idct->nr_of_render_targets; ++i)
502      pipe_surface_reference(&buffer->fb_state[0].cbufs[i], NULL);
503
504   pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, NULL);
505}
506
507struct pipe_sampler_view *
508vl_idct_upload_matrix(struct pipe_context *pipe, float scale)
509{
510   struct pipe_resource tex_templ, *matrix;
511   struct pipe_sampler_view sv_templ, *sv;
512   struct pipe_transfer *buf_transfer;
513   unsigned i, j, pitch;
514   float *f;
515
516   struct pipe_box rect =
517   {
518      0, 0, 0,
519      BLOCK_WIDTH / 4,
520      BLOCK_HEIGHT,
521      1
522   };
523
524   assert(pipe);
525
526   memset(&tex_templ, 0, sizeof(tex_templ));
527   tex_templ.target = PIPE_TEXTURE_2D;
528   tex_templ.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
529   tex_templ.last_level = 0;
530   tex_templ.width0 = 2;
531   tex_templ.height0 = 8;
532   tex_templ.depth0 = 1;
533   tex_templ.array_size = 1;
534   tex_templ.usage = PIPE_USAGE_IMMUTABLE;
535   tex_templ.bind = PIPE_BIND_SAMPLER_VIEW;
536   tex_templ.flags = 0;
537
538   matrix = pipe->screen->resource_create(pipe->screen, &tex_templ);
539   if (!matrix)
540      goto error_matrix;
541
542   buf_transfer = pipe->get_transfer
543   (
544      pipe, matrix,
545      0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
546      &rect
547   );
548   if (!buf_transfer)
549      goto error_transfer;
550
551   pitch = buf_transfer->stride / sizeof(float);
552
553   f = pipe->transfer_map(pipe, buf_transfer);
554   if (!f)
555      goto error_map;
556
557   for(i = 0; i < BLOCK_HEIGHT; ++i)
558      for(j = 0; j < BLOCK_WIDTH; ++j)
559         // transpose and scale
560         f[i * pitch + j] = const_matrix[j][i] * scale;
561
562   pipe->transfer_unmap(pipe, buf_transfer);
563   pipe->transfer_destroy(pipe, buf_transfer);
564
565   memset(&sv_templ, 0, sizeof(sv_templ));
566   u_sampler_view_default_template(&sv_templ, matrix, matrix->format);
567   sv = pipe->create_sampler_view(pipe, matrix, &sv_templ);
568   pipe_resource_reference(&matrix, NULL);
569   if (!sv)
570      goto error_map;
571
572   return sv;
573
574error_map:
575   pipe->transfer_destroy(pipe, buf_transfer);
576
577error_transfer:
578   pipe_resource_reference(&matrix, NULL);
579
580error_matrix:
581   return NULL;
582}
583
584bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
585                  unsigned buffer_width, unsigned buffer_height,
586                  unsigned blocks_x, unsigned blocks_y,
587                  unsigned nr_of_render_targets,
588                  struct pipe_sampler_view *matrix,
589                  struct pipe_sampler_view *transpose)
590{
591   assert(idct && pipe && matrix);
592
593   idct->pipe = pipe;
594   idct->buffer_width = buffer_width;
595   idct->buffer_height = buffer_height;
596   idct->blocks_x = blocks_x;
597   idct->blocks_y = blocks_y;
598   idct->nr_of_render_targets = nr_of_render_targets;
599
600   pipe_sampler_view_reference(&idct->matrix, matrix);
601   pipe_sampler_view_reference(&idct->transpose, transpose);
602
603   if(!init_shaders(idct))
604      return false;
605
606   if(!init_state(idct)) {
607      cleanup_shaders(idct);
608      return false;
609   }
610
611   return true;
612}
613
614void
615vl_idct_cleanup(struct vl_idct *idct)
616{
617   cleanup_shaders(idct);
618   cleanup_state(idct);
619
620   pipe_sampler_view_reference(&idct->matrix, NULL);
621}
622
623bool
624vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer,
625                    struct pipe_sampler_view *source,
626                    struct pipe_sampler_view *intermediate,
627                    struct pipe_surface *destination)
628{
629   unsigned i;
630
631   assert(buffer);
632   assert(idct);
633   assert(source);
634   assert(destination);
635
636   pipe_sampler_view_reference(&buffer->sampler_views.individual.matrix, idct->matrix);
637   pipe_sampler_view_reference(&buffer->sampler_views.individual.source, source);
638   pipe_sampler_view_reference(&buffer->sampler_views.individual.transpose, idct->transpose);
639   pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, intermediate);
640
641   if (!init_intermediate(idct, buffer))
642      return false;
643
644   /* init state */
645   buffer->fb_state[1].width = destination->texture->width0;
646   buffer->fb_state[1].height = destination->texture->height0;
647   buffer->fb_state[1].nr_cbufs = 1;
648   pipe_surface_reference(&buffer->fb_state[1].cbufs[0], destination);
649
650   buffer->viewport[1].scale[0] = destination->texture->width0;
651   buffer->viewport[1].scale[1] = destination->texture->height0;
652
653   for(i = 0; i < 2; ++i) {
654      buffer->viewport[i].scale[2] = 1;
655      buffer->viewport[i].scale[3] = 1;
656      buffer->viewport[i].translate[0] = 0;
657      buffer->viewport[i].translate[1] = 0;
658      buffer->viewport[i].translate[2] = 0;
659      buffer->viewport[i].translate[3] = 0;
660
661      buffer->fb_state[i].zsbuf = NULL;
662   }
663
664   return true;
665}
666
667void
668vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer)
669{
670   unsigned i;
671
672   assert(idct && buffer);
673
674   for(i = 0; i < idct->nr_of_render_targets; ++i)
675      pipe_surface_reference(&buffer->fb_state[0].cbufs[i], NULL);
676
677   pipe_surface_reference(&buffer->fb_state[1].cbufs[0], NULL);
678
679   cleanup_intermediate(idct, buffer);
680}
681
682void
683vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer, unsigned num_instances)
684{
685   unsigned num_verts;
686
687   assert(idct);
688   assert(buffer);
689
690   if(num_instances > 0) {
691      num_verts = idct->blocks_x * idct->blocks_y * 4;
692
693      idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
694      idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers);
695
696      /* first stage */
697      idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[0]);
698      idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[0]);
699      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]);
700      idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
701      idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
702      util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts, 0, num_instances);
703
704      /* second stage */
705      idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[1]);
706      idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[1]);
707      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]);
708      idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
709      idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
710      util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts, 0, num_instances);
711   }
712}
713