vl_idct.c revision 3db6514357a7c634045ae7bc7bba7d7dbf9d58c5
1/**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <assert.h>
29
30#include <pipe/p_context.h>
31#include <pipe/p_screen.h>
32
33#include <util/u_draw.h>
34#include <util/u_sampler.h>
35
36#include <tgsi/tgsi_ureg.h>
37
38#include "vl_defines.h"
39#include "vl_types.h"
40#include "vl_vertex_buffers.h"
41#include "vl_idct.h"
42
43enum VS_OUTPUT
44{
45   VS_O_VPOS,
46   VS_O_L_ADDR0,
47   VS_O_L_ADDR1,
48   VS_O_R_ADDR0,
49   VS_O_R_ADDR1
50};
51
52static const float const_matrix[8][8] = {
53   {  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.353553f,  0.3535530f },
54   {  0.4903930f,  0.4157350f,  0.2777850f,  0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
55   {  0.4619400f,  0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f,  0.191342f,  0.4619400f },
56   {  0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f,  0.2777850f,  0.4903930f,  0.097545f, -0.4157350f },
57   {  0.3535530f, -0.3535530f, -0.3535530f,  0.3535540f,  0.3535530f, -0.3535540f, -0.353553f,  0.3535530f },
58   {  0.2777850f, -0.4903930f,  0.0975452f,  0.4157350f, -0.4157350f, -0.0975451f,  0.490393f, -0.2777850f },
59   {  0.1913420f, -0.4619400f,  0.4619400f, -0.1913420f, -0.1913410f,  0.4619400f, -0.461940f,  0.1913420f },
60   {  0.0975451f, -0.2777850f,  0.4157350f, -0.4903930f,  0.4903930f, -0.4157350f,  0.277786f, -0.0975458f }
61};
62
63static void
64calc_addr(struct ureg_program *shader, struct ureg_dst addr[2],
65          struct ureg_src tc, struct ureg_src start, bool right_side,
66          bool transposed, float size)
67{
68   unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
69   unsigned sw_start = right_side ? TGSI_SWIZZLE_Y : TGSI_SWIZZLE_X;
70
71   unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
72   unsigned sw_tc = right_side ? TGSI_SWIZZLE_X : TGSI_SWIZZLE_Y;
73
74   /*
75    * addr[0..1].(start) = right_side ? start.x : tc.x
76    * addr[0..1].(tc) = right_side ? tc.y : start.y
77    * addr[0..1].z = tc.z
78    * addr[1].(start) += 1.0f / scale
79    */
80   ureg_MOV(shader, ureg_writemask(addr[0], wm_start), ureg_scalar(start, sw_start));
81   ureg_MOV(shader, ureg_writemask(addr[0], wm_tc), ureg_scalar(tc, sw_tc));
82   ureg_MOV(shader, ureg_writemask(addr[0], TGSI_WRITEMASK_Z), tc);
83
84   ureg_ADD(shader, ureg_writemask(addr[1], wm_start), ureg_scalar(start, sw_start), ureg_imm1f(shader, 1.0f / size));
85   ureg_MOV(shader, ureg_writemask(addr[1], wm_tc), ureg_scalar(tc, sw_tc));
86   ureg_MOV(shader, ureg_writemask(addr[1], TGSI_WRITEMASK_Z), tc);
87}
88
89static void
90increment_addr(struct ureg_program *shader, struct ureg_dst daddr[2],
91               struct ureg_src saddr[2], bool right_side, bool transposed,
92               int pos, float size)
93{
94   unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
95   unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
96
97   /*
98    * daddr[0..1].(start) = saddr[0..1].(start)
99    * daddr[0..1].(tc) = saddr[0..1].(tc)
100    */
101
102   ureg_MOV(shader, ureg_writemask(daddr[0], wm_start), saddr[0]);
103   ureg_ADD(shader, ureg_writemask(daddr[0], wm_tc), saddr[0], ureg_imm1f(shader, pos / size));
104   ureg_MOV(shader, ureg_writemask(daddr[1], wm_start), saddr[1]);
105   ureg_ADD(shader, ureg_writemask(daddr[1], wm_tc), saddr[1], ureg_imm1f(shader, pos / size));
106}
107
108static void
109fetch_four(struct ureg_program *shader, struct ureg_dst m[2], struct ureg_src addr[2], struct ureg_src sampler)
110{
111   ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, addr[0], sampler);
112   ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, addr[1], sampler);
113}
114
115static void
116matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
117{
118   struct ureg_dst tmp;
119
120   tmp = ureg_DECL_temporary(shader);
121
122   /*
123    * tmp.xy = dot4(m[0][0..1], m[1][0..1])
124    * dst = tmp.x + tmp.y
125    */
126   ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
127   ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1]));
128   ureg_ADD(shader, dst,
129      ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X),
130      ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
131
132   ureg_release_temporary(shader, tmp);
133}
134
135static void *
136create_stage1_vert_shader(struct vl_idct *idct)
137{
138   struct ureg_program *shader;
139   struct ureg_src vrect, vpos;
140   struct ureg_src scale;
141   struct ureg_dst t_tex, t_start;
142   struct ureg_dst o_vpos, o_l_addr[2], o_r_addr[2];
143
144   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
145   if (!shader)
146      return NULL;
147
148   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
149   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
150
151   t_tex = ureg_DECL_temporary(shader);
152   t_start = ureg_DECL_temporary(shader);
153
154   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
155
156   o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0);
157   o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1);
158
159   o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0);
160   o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1);
161
162   /*
163    * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
164    *
165    * t_vpos = vpos + vrect
166    * o_vpos.xy = t_vpos * scale
167    * o_vpos.zw = vpos
168    *
169    * o_l_addr = calc_addr(...)
170    * o_r_addr = calc_addr(...)
171    *
172    */
173
174   scale = ureg_imm2f(shader,
175      (float)BLOCK_WIDTH / idct->buffer_width,
176      (float)BLOCK_HEIGHT / idct->buffer_height);
177
178   ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, vrect);
179   ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
180
181   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_tex));
182   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
183
184   ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_Z),
185      ureg_scalar(vrect, TGSI_SWIZZLE_X),
186      ureg_imm1f(shader, BLOCK_WIDTH / idct->nr_of_render_targets));
187   ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
188
189   calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4);
190   calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4);
191
192   ureg_release_temporary(shader, t_tex);
193   ureg_release_temporary(shader, t_start);
194
195   ureg_END(shader);
196
197   return ureg_create_shader_and_destroy(shader, idct->pipe);
198}
199
200static void *
201create_stage1_frag_shader(struct vl_idct *idct)
202{
203   struct ureg_program *shader;
204
205   struct ureg_src l_addr[2], r_addr[2];
206
207   struct ureg_dst l[4][2], r[2];
208   struct ureg_dst fragment[idct->nr_of_render_targets];
209
210   int i, j;
211
212   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
213   if (!shader)
214      return NULL;
215
216   l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
217   l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
218
219   r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
220   r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
221
222   for (i = 0; i < idct->nr_of_render_targets; ++i)
223       fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
224
225   for (i = 0; i < 4; ++i) {
226      l[i][0] = ureg_DECL_temporary(shader);
227      l[i][1] = ureg_DECL_temporary(shader);
228   }
229
230   r[0] = ureg_DECL_temporary(shader);
231   r[1] = ureg_DECL_temporary(shader);
232
233   for (i = 0; i < 4; ++i) {
234      increment_addr(shader, l[i], l_addr, false, false, i - 2, idct->buffer_height);
235   }
236
237   for (i = 0; i < 4; ++i) {
238      struct ureg_src s_addr[2] = { ureg_src(l[i][0]), ureg_src(l[i][1]) };
239      fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 1));
240   }
241
242   for (i = 0; i < idct->nr_of_render_targets; ++i) {
243      increment_addr(shader, r, r_addr, true, true, i - (signed)idct->nr_of_render_targets / 2, BLOCK_HEIGHT);
244
245      struct ureg_src s_addr[2] = { ureg_src(r[0]), ureg_src(r[1]) };
246      fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 0));
247
248      for (j = 0; j < 4; ++j) {
249         matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
250      }
251   }
252
253   for (i = 0; i < 4; ++i) {
254      ureg_release_temporary(shader, l[i][0]);
255      ureg_release_temporary(shader, l[i][1]);
256   }
257   ureg_release_temporary(shader, r[0]);
258   ureg_release_temporary(shader, r[1]);
259
260   ureg_END(shader);
261
262   return ureg_create_shader_and_destroy(shader, idct->pipe);
263}
264
265void
266vl_idct_stage2_vert_shader(struct vl_idct *idct, struct ureg_program *shader,
267                           unsigned first_output, struct ureg_dst tex)
268{
269   struct ureg_src vrect, vpos;
270   struct ureg_src scale;
271   struct ureg_dst t_start;
272   struct ureg_dst o_l_addr[2], o_r_addr[2];
273
274   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
275   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
276
277   t_start = ureg_DECL_temporary(shader);
278
279   --first_output;
280
281   o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output + VS_O_L_ADDR0);
282   o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output + VS_O_L_ADDR1);
283
284   o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output + VS_O_R_ADDR0);
285   o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output + VS_O_R_ADDR1);
286
287   scale = ureg_imm2f(shader,
288      (float)BLOCK_WIDTH / idct->buffer_width,
289      (float)BLOCK_HEIGHT / idct->buffer_height);
290
291   ureg_MUL(shader, ureg_writemask(tex, TGSI_WRITEMASK_Z),
292      ureg_scalar(vrect, TGSI_SWIZZLE_X),
293      ureg_imm1f(shader, BLOCK_WIDTH / idct->nr_of_render_targets));
294   ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
295
296   calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4);
297   calc_addr(shader, o_r_addr, ureg_src(tex), ureg_src(t_start), true, false, idct->buffer_height / 4);
298}
299
300void
301vl_idct_stage2_frag_shader(struct vl_idct *idct, struct ureg_program *shader,
302                           unsigned first_input, struct ureg_dst fragment)
303{
304   struct ureg_src l_addr[2], r_addr[2];
305
306   struct ureg_dst l[2], r[2];
307
308   --first_input;
309
310   l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
311   l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
312
313   r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
314   r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input + VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
315
316   l[0] = ureg_DECL_temporary(shader);
317   l[1] = ureg_DECL_temporary(shader);
318   r[0] = ureg_DECL_temporary(shader);
319   r[1] = ureg_DECL_temporary(shader);
320
321   fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 0));
322   fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 1));
323
324   matrix_mul(shader, fragment, l, r);
325
326   ureg_release_temporary(shader, l[0]);
327   ureg_release_temporary(shader, l[1]);
328   ureg_release_temporary(shader, r[0]);
329   ureg_release_temporary(shader, r[1]);
330}
331
332static bool
333init_shaders(struct vl_idct *idct)
334{
335   idct->vs = create_stage1_vert_shader(idct);
336   if (!idct->vs)
337      goto error_vs;
338
339   idct->fs = create_stage1_frag_shader(idct);
340   if (!idct->fs)
341      goto error_fs;
342
343   return true;
344
345error_fs:
346   idct->pipe->delete_vs_state(idct->pipe, idct->vs);
347
348error_vs:
349   return false;
350}
351
352static void
353cleanup_shaders(struct vl_idct *idct)
354{
355   idct->pipe->delete_vs_state(idct->pipe, idct->vs);
356   idct->pipe->delete_fs_state(idct->pipe, idct->fs);
357}
358
359static bool
360init_state(struct vl_idct *idct)
361{
362   struct pipe_blend_state blend;
363   struct pipe_rasterizer_state rs_state;
364   struct pipe_sampler_state sampler;
365   unsigned i;
366
367   assert(idct);
368
369   memset(&rs_state, 0, sizeof(rs_state));
370   rs_state.gl_rasterization_rules = true;
371   idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
372   if (!idct->rs_state)
373      goto error_rs_state;
374
375   memset(&blend, 0, sizeof blend);
376
377   blend.independent_blend_enable = 0;
378   blend.rt[0].blend_enable = 0;
379   blend.rt[0].rgb_func = PIPE_BLEND_ADD;
380   blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
381   blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
382   blend.rt[0].alpha_func = PIPE_BLEND_ADD;
383   blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
384   blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
385   blend.logicop_enable = 0;
386   blend.logicop_func = PIPE_LOGICOP_CLEAR;
387   /* Needed to allow color writes to FB, even if blending disabled */
388   blend.rt[0].colormask = PIPE_MASK_RGBA;
389   blend.dither = 0;
390   idct->blend = idct->pipe->create_blend_state(idct->pipe, &blend);
391   if (!idct->blend)
392      goto error_blend;
393
394   for (i = 0; i < 2; ++i) {
395      memset(&sampler, 0, sizeof(sampler));
396      sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
397      sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
398      sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
399      sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
400      sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
401      sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
402      sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
403      sampler.compare_func = PIPE_FUNC_ALWAYS;
404      sampler.normalized_coords = 1;
405      idct->samplers[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
406      if (!idct->samplers[i])
407         goto error_samplers;
408   }
409
410   return true;
411
412error_samplers:
413   for (i = 0; i < 2; ++i)
414      if (idct->samplers[i])
415         idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
416
417   idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
418
419error_blend:
420   idct->pipe->delete_blend_state(idct->pipe, idct->blend);
421
422error_rs_state:
423   return false;
424}
425
426static void
427cleanup_state(struct vl_idct *idct)
428{
429   unsigned i;
430
431   for (i = 0; i < 2; ++i)
432      idct->pipe->delete_sampler_state(idct->pipe, idct->samplers[i]);
433
434   idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
435   idct->pipe->delete_blend_state(idct->pipe, idct->blend);
436}
437
438static bool
439init_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
440{
441   struct pipe_resource *tex;
442   struct pipe_surface surf_templ;
443   unsigned i;
444
445   assert(idct && buffer);
446
447   tex = buffer->sampler_views.individual.intermediate->texture;
448
449   buffer->fb_state.width = tex->width0;
450   buffer->fb_state.height = tex->height0;
451   buffer->fb_state.nr_cbufs = idct->nr_of_render_targets;
452   for(i = 0; i < idct->nr_of_render_targets; ++i) {
453      memset(&surf_templ, 0, sizeof(surf_templ));
454      surf_templ.format = tex->format;
455      surf_templ.u.tex.first_layer = i;
456      surf_templ.u.tex.last_layer = i;
457      surf_templ.usage = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
458      buffer->fb_state.cbufs[i] = idct->pipe->create_surface(
459         idct->pipe, tex, &surf_templ);
460
461      if (!buffer->fb_state.cbufs[i])
462         goto error_surfaces;
463   }
464
465   buffer->viewport.scale[0] = tex->width0;
466   buffer->viewport.scale[1] = tex->height0;
467
468   return true;
469
470error_surfaces:
471   for(i = 0; i < idct->nr_of_render_targets; ++i)
472      pipe_surface_reference(&buffer->fb_state.cbufs[i], NULL);
473
474   return false;
475}
476
477static void
478cleanup_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
479{
480   unsigned i;
481
482   assert(idct && buffer);
483
484   for(i = 0; i < idct->nr_of_render_targets; ++i)
485      pipe_surface_reference(&buffer->fb_state.cbufs[i], NULL);
486
487   pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, NULL);
488}
489
490struct pipe_sampler_view *
491vl_idct_upload_matrix(struct pipe_context *pipe, float scale)
492{
493   struct pipe_resource tex_templ, *matrix;
494   struct pipe_sampler_view sv_templ, *sv;
495   struct pipe_transfer *buf_transfer;
496   unsigned i, j, pitch;
497   float *f;
498
499   struct pipe_box rect =
500   {
501      0, 0, 0,
502      BLOCK_WIDTH / 4,
503      BLOCK_HEIGHT,
504      1
505   };
506
507   assert(pipe);
508
509   memset(&tex_templ, 0, sizeof(tex_templ));
510   tex_templ.target = PIPE_TEXTURE_2D;
511   tex_templ.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
512   tex_templ.last_level = 0;
513   tex_templ.width0 = 2;
514   tex_templ.height0 = 8;
515   tex_templ.depth0 = 1;
516   tex_templ.array_size = 1;
517   tex_templ.usage = PIPE_USAGE_IMMUTABLE;
518   tex_templ.bind = PIPE_BIND_SAMPLER_VIEW;
519   tex_templ.flags = 0;
520
521   matrix = pipe->screen->resource_create(pipe->screen, &tex_templ);
522   if (!matrix)
523      goto error_matrix;
524
525   buf_transfer = pipe->get_transfer
526   (
527      pipe, matrix,
528      0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
529      &rect
530   );
531   if (!buf_transfer)
532      goto error_transfer;
533
534   pitch = buf_transfer->stride / sizeof(float);
535
536   f = pipe->transfer_map(pipe, buf_transfer);
537   if (!f)
538      goto error_map;
539
540   for(i = 0; i < BLOCK_HEIGHT; ++i)
541      for(j = 0; j < BLOCK_WIDTH; ++j)
542         // transpose and scale
543         f[i * pitch + j] = const_matrix[j][i] * scale;
544
545   pipe->transfer_unmap(pipe, buf_transfer);
546   pipe->transfer_destroy(pipe, buf_transfer);
547
548   memset(&sv_templ, 0, sizeof(sv_templ));
549   u_sampler_view_default_template(&sv_templ, matrix, matrix->format);
550   sv = pipe->create_sampler_view(pipe, matrix, &sv_templ);
551   pipe_resource_reference(&matrix, NULL);
552   if (!sv)
553      goto error_map;
554
555   return sv;
556
557error_map:
558   pipe->transfer_destroy(pipe, buf_transfer);
559
560error_transfer:
561   pipe_resource_reference(&matrix, NULL);
562
563error_matrix:
564   return NULL;
565}
566
567bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
568                  unsigned buffer_width, unsigned buffer_height,
569                  unsigned nr_of_render_targets,
570                  struct pipe_sampler_view *matrix,
571                  struct pipe_sampler_view *transpose)
572{
573   assert(idct && pipe && matrix);
574
575   idct->pipe = pipe;
576   idct->buffer_width = buffer_width;
577   idct->buffer_height = buffer_height;
578   idct->nr_of_render_targets = nr_of_render_targets;
579
580   pipe_sampler_view_reference(&idct->matrix, matrix);
581   pipe_sampler_view_reference(&idct->transpose, transpose);
582
583   if(!init_shaders(idct))
584      return false;
585
586   if(!init_state(idct)) {
587      cleanup_shaders(idct);
588      return false;
589   }
590
591   return true;
592}
593
594void
595vl_idct_cleanup(struct vl_idct *idct)
596{
597   cleanup_shaders(idct);
598   cleanup_state(idct);
599
600   pipe_sampler_view_reference(&idct->matrix, NULL);
601}
602
603bool
604vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer,
605                    struct pipe_sampler_view *source,
606                    struct pipe_sampler_view *intermediate,
607                    struct pipe_surface *destination)
608{
609   assert(buffer);
610   assert(idct);
611   assert(source);
612   assert(destination);
613
614   memset(buffer, 0, sizeof(struct vl_idct_buffer));
615
616   pipe_sampler_view_reference(&buffer->sampler_views.individual.matrix, idct->matrix);
617   pipe_sampler_view_reference(&buffer->sampler_views.individual.source, source);
618   pipe_sampler_view_reference(&buffer->sampler_views.individual.transpose, idct->transpose);
619   pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, intermediate);
620
621   if (!init_intermediate(idct, buffer))
622      return false;
623
624   buffer->viewport.scale[2] = 1;
625   buffer->viewport.scale[3] = 1;
626   buffer->viewport.translate[0] = 0;
627   buffer->viewport.translate[1] = 0;
628   buffer->viewport.translate[2] = 0;
629   buffer->viewport.translate[3] = 0;
630
631   return true;
632}
633
634void
635vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer)
636{
637   unsigned i;
638
639   assert(idct && buffer);
640
641   for(i = 0; i < idct->nr_of_render_targets; ++i)
642      pipe_surface_reference(&buffer->fb_state.cbufs[i], NULL);
643
644   cleanup_intermediate(idct, buffer);
645}
646
647void
648vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer, unsigned num_instances)
649{
650   assert(idct);
651   assert(buffer);
652
653   idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
654   idct->pipe->bind_blend_state(idct->pipe, idct->blend);
655   idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers);
656
657   /* first stage */
658   idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state);
659   idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport);
660   idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]);
661   idct->pipe->bind_vs_state(idct->pipe, idct->vs);
662   idct->pipe->bind_fs_state(idct->pipe, idct->fs);
663   util_draw_arrays_instanced(idct->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
664}
665
666void
667vl_idct_prepare_stage2(struct vl_idct *idct, struct vl_idct_buffer *buffer)
668{
669   assert(idct);
670   assert(buffer);
671
672   /* second stage */
673   idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
674   idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers);
675   idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]);
676}
677
678