vl_idct.c revision 7408a6ab89e0bc87209b50334604fae93277fdc6
1/**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vl_idct.h"
29#include "util/u_draw.h"
30#include <assert.h>
31#include <pipe/p_context.h>
32#include <pipe/p_screen.h>
33#include <util/u_inlines.h>
34#include <util/u_sampler.h>
35#include <util/u_format.h>
36#include <tgsi/tgsi_ureg.h>
37#include "vl_types.h"
38
39#define BLOCK_WIDTH 8
40#define BLOCK_HEIGHT 8
41
42#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
43
44#define STAGE1_SCALE 4.0f
45#define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE)
46
47struct vertex_shader_consts
48{
49   struct vertex4f norm;
50};
51
52enum VS_INPUT
53{
54   VS_I_RECT,
55   VS_I_VPOS,
56
57   NUM_VS_INPUTS
58};
59
60enum VS_OUTPUT
61{
62   VS_O_VPOS,
63   VS_O_BLOCK,
64   VS_O_TEX,
65   VS_O_START
66};
67
68static const float const_matrix[8][8] = {
69   {  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.353553f,  0.3535530f },
70   {  0.4903930f,  0.4157350f,  0.2777850f,  0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
71   {  0.4619400f,  0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f,  0.191342f,  0.4619400f },
72   {  0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f,  0.2777850f,  0.4903930f,  0.097545f, -0.4157350f },
73   {  0.3535530f, -0.3535530f, -0.3535530f,  0.3535540f,  0.3535530f, -0.3535540f, -0.353553f,  0.3535530f },
74   {  0.2777850f, -0.4903930f,  0.0975452f,  0.4157350f, -0.4157350f, -0.0975451f,  0.490393f, -0.2777850f },
75   {  0.1913420f, -0.4619400f,  0.4619400f, -0.1913420f, -0.1913410f,  0.4619400f, -0.461940f,  0.1913420f },
76   {  0.0975451f, -0.2777850f,  0.4157350f, -0.4903930f,  0.4903930f, -0.4157350f,  0.277786f, -0.0975458f }
77};
78
79/* vertices for a quad covering a block */
80static const struct vertex2f const_quad[4] = {
81   {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f}
82};
83
84static void *
85create_vert_shader(struct vl_idct *idct)
86{
87   struct ureg_program *shader;
88   struct ureg_src scale;
89   struct ureg_src vrect, vpos;
90   struct ureg_dst t_vpos;
91   struct ureg_dst o_vpos, o_block, o_tex, o_start;
92
93   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
94   if (!shader)
95      return NULL;
96
97   scale = ureg_imm2f(shader,
98      (float)BLOCK_WIDTH / idct->destination->width0,
99      (float)BLOCK_HEIGHT / idct->destination->height0);
100
101   t_vpos = ureg_DECL_temporary(shader);
102
103   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
104   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
105
106   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
107   o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
108   o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
109   o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
110
111   /*
112    * t_vpos = vpos + vrect
113    * o_vpos.xy = t_vpos * scale
114    * o_vpos.zw = vpos
115    *
116    * o_block = vrect
117    * o_tex = t_pos
118    * o_start = vpos * scale
119    *
120    */
121   ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
122   ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), scale);
123   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
124   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
125
126   ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
127   ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
128   ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, scale);
129
130   ureg_release_temporary(shader, t_vpos);
131
132   ureg_END(shader);
133
134   return ureg_create_shader_and_destroy(shader, idct->pipe);
135}
136
137static void
138matrix_mul(struct ureg_program *shader, struct ureg_dst dst,
139           struct ureg_src tc[2], struct ureg_src sampler[2],
140           struct ureg_src start[2], struct ureg_src step[2],
141           bool fetch4[2], float scale)
142{
143   struct ureg_dst t_tc[2], m[2][2], tmp[2];
144   unsigned side, i, j;
145
146   for(i = 0; i < 2; ++i) {
147      t_tc[i] = ureg_DECL_temporary(shader);
148      for(j = 0; j < 2; ++j)
149         m[i][j] = ureg_DECL_temporary(shader);
150      tmp[i] = ureg_DECL_temporary(shader);
151   }
152
153   /*
154    * m[0..1][0] = ?
155    * tmp[0..1] = dot4(m[0..1][0], m[0..1][1])
156    * fragment = tmp[0] + tmp[1]
157    */
158   ureg_MOV(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_X), ureg_scalar(start[0], TGSI_SWIZZLE_X));
159   ureg_MOV(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_Y), ureg_scalar(tc[0], TGSI_SWIZZLE_Y));
160
161   if(fetch4[1]) {
162      ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_X), ureg_scalar(start[1], TGSI_SWIZZLE_Y));
163      ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_Y), ureg_scalar(tc[1], TGSI_SWIZZLE_X));
164   } else {
165      ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_X), ureg_scalar(tc[1], TGSI_SWIZZLE_X));
166      ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_Y), ureg_scalar(start[1], TGSI_SWIZZLE_Y));
167   }
168
169   for(side = 0; side < 2; ++side) {
170      for(i = 0; i < 2; ++i) {
171         if(fetch4[side]) {
172            ureg_TEX(shader, m[i][side], TGSI_TEXTURE_2D, ureg_src(t_tc[side]), sampler[side]);
173            ureg_MOV(shader, ureg_writemask(t_tc[side], TGSI_WRITEMASK_X), step[side]);
174
175         } else for(j = 0; j < 4; ++j) {
176            /* Nouveau and r600g can't writemask tex dst regs (yet?), do in two steps */
177            ureg_TEX(shader, tmp[side], TGSI_TEXTURE_2D, ureg_src(t_tc[side]), sampler[side]);
178            ureg_MOV(shader, ureg_writemask(m[i][side], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp[side]), TGSI_SWIZZLE_X));
179
180            ureg_ADD(shader, ureg_writemask(t_tc[side], TGSI_WRITEMASK_X << side), ureg_src(t_tc[side]), step[side]);
181         }
182      }
183   }
184
185   ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(m[0][0]), ureg_src(m[0][1]));
186   ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(m[1][0]), ureg_src(m[1][1]));
187   ureg_ADD(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(tmp[0]), ureg_src(tmp[1]));
188   ureg_MUL(shader, dst, ureg_src(tmp[0]), ureg_imm1f(shader, scale));
189
190   for(i = 0; i < 2; ++i) {
191      ureg_release_temporary(shader, t_tc[i]);
192      for(j = 0; j < 2; ++j)
193         ureg_release_temporary(shader, m[i][j]);
194      ureg_release_temporary(shader, tmp[i]);
195   }
196}
197
198static void *
199create_transpose_frag_shader(struct vl_idct *idct)
200{
201   struct ureg_program *shader;
202   struct ureg_src tc[2], sampler[2];
203   struct ureg_src start[2], step[2];
204   struct ureg_dst fragment;
205   bool fetch4[2];
206
207   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
208   if (!shader)
209      return NULL;
210
211   tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
212   tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
213
214   start[0] = ureg_imm1f(shader, 0.0f);
215   start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
216
217   step[0] = ureg_imm1f(shader, 4.0f / BLOCK_HEIGHT);
218   step[1] = ureg_imm1f(shader, 1.0f / idct->destination->height0);
219
220   sampler[0] = ureg_DECL_sampler(shader, 0);
221   sampler[1] = ureg_DECL_sampler(shader, 1);
222
223   fetch4[0] = true;
224   fetch4[1] = false;
225
226   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
227
228   matrix_mul(shader, fragment, tc, sampler, start, step, fetch4, STAGE1_SCALE);
229
230   ureg_END(shader);
231
232   return ureg_create_shader_and_destroy(shader, idct->pipe);
233}
234
235static void *
236create_matrix_frag_shader(struct vl_idct *idct)
237{
238   struct ureg_program *shader;
239   struct ureg_src tc[2], sampler[2];
240   struct ureg_src start[2], step[2];
241   struct ureg_dst fragment;
242   bool fetch4[2];
243
244   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
245   if (!shader)
246      return NULL;
247
248   tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
249   tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
250
251   start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
252   start[1] = ureg_imm1f(shader, 0.0f);
253
254   step[0] = ureg_imm1f(shader, 1.0f / idct->destination->width0);
255   step[1] = ureg_imm1f(shader, 4.0f / BLOCK_WIDTH);
256
257   sampler[0] = ureg_DECL_sampler(shader, 1);
258   sampler[1] = ureg_DECL_sampler(shader, 0);
259
260   fetch4[0] = false;
261   fetch4[1] = true;
262
263   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
264
265   matrix_mul(shader, fragment, tc, sampler, start, step, fetch4, STAGE2_SCALE);
266
267   ureg_END(shader);
268
269   return ureg_create_shader_and_destroy(shader, idct->pipe);
270}
271
272static void *
273create_empty_block_frag_shader(struct vl_idct *idct)
274{
275   struct ureg_program *shader;
276   struct ureg_dst fragment;
277
278   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
279   if (!shader)
280      return NULL;
281
282   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
283
284   ureg_MOV(shader, fragment, ureg_imm1f(shader, 0.0f));
285
286   ureg_END(shader);
287
288   return ureg_create_shader_and_destroy(shader, idct->pipe);
289}
290
291static void
292xfer_buffers_map(struct vl_idct *idct)
293{
294   struct pipe_box rect =
295   {
296      0, 0, 0,
297      idct->destination->width0,
298      idct->destination->height0,
299      1
300   };
301
302   idct->tex_transfer = idct->pipe->get_transfer
303   (
304      idct->pipe, idct->textures.individual.source,
305      u_subresource(0, 0),
306      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
307      &rect
308   );
309
310   idct->texels = idct->pipe->transfer_map(idct->pipe, idct->tex_transfer);
311
312   idct->vectors = pipe_buffer_map
313   (
314      idct->pipe,
315      idct->vertex_bufs.individual.pos.buffer,
316      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
317      &idct->vec_transfer
318   );
319}
320
321static void
322xfer_buffers_unmap(struct vl_idct *idct)
323{
324   pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.pos.buffer, idct->vec_transfer);
325
326   idct->pipe->transfer_unmap(idct->pipe, idct->tex_transfer);
327   idct->pipe->transfer_destroy(idct->pipe, idct->tex_transfer);
328}
329
330static bool
331init_shaders(struct vl_idct *idct)
332{
333   idct->vs = create_vert_shader(idct);
334   idct->transpose_fs = create_transpose_frag_shader(idct);
335   idct->matrix_fs = create_matrix_frag_shader(idct);
336   idct->eb_fs = create_empty_block_frag_shader(idct);
337
338   return
339      idct->vs != NULL &&
340      idct->transpose_fs != NULL &&
341      idct->matrix_fs != NULL &&
342      idct->eb_fs != NULL;
343}
344
345static void
346cleanup_shaders(struct vl_idct *idct)
347{
348   idct->pipe->delete_vs_state(idct->pipe, idct->vs);
349   idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
350   idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
351   idct->pipe->delete_fs_state(idct->pipe, idct->eb_fs);
352}
353
354static bool
355init_buffers(struct vl_idct *idct)
356{
357   struct pipe_resource template;
358   struct pipe_sampler_view sampler_view;
359   struct pipe_vertex_element vertex_elems[2];
360   unsigned i;
361
362   idct->max_blocks =
363      align(idct->destination->width0, BLOCK_WIDTH) / BLOCK_WIDTH *
364      align(idct->destination->height0, BLOCK_HEIGHT) / BLOCK_HEIGHT *
365      idct->destination->depth0;
366
367   memset(&template, 0, sizeof(struct pipe_resource));
368   template.target = PIPE_TEXTURE_2D;
369   template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
370   template.last_level = 0;
371   template.width0 = 2;
372   template.height0 = 8;
373   template.depth0 = 1;
374   template.usage = PIPE_USAGE_IMMUTABLE;
375   template.bind = PIPE_BIND_SAMPLER_VIEW;
376   template.flags = 0;
377
378   template.format = idct->destination->format;
379   template.width0 = idct->destination->width0;
380   template.height0 = idct->destination->height0;
381   template.depth0 = idct->destination->depth0;
382   template.usage = PIPE_USAGE_DYNAMIC;
383   idct->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
384
385   template.usage = PIPE_USAGE_STATIC;
386   idct->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
387
388   for (i = 0; i < 4; ++i) {
389      if(idct->textures.all[i] == NULL)
390         return false; /* a texture failed to allocate */
391
392      u_sampler_view_default_template(&sampler_view, idct->textures.all[i], idct->textures.all[i]->format);
393      idct->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, idct->textures.all[i], &sampler_view);
394   }
395
396   idct->vertex_bufs.individual.quad.stride = sizeof(struct vertex2f);
397   idct->vertex_bufs.individual.quad.max_index = 4 * idct->max_blocks - 1;
398   idct->vertex_bufs.individual.quad.buffer_offset = 0;
399   idct->vertex_bufs.individual.quad.buffer = pipe_buffer_create
400   (
401      idct->pipe->screen,
402      PIPE_BIND_VERTEX_BUFFER,
403      sizeof(struct vertex2f) * 4 * idct->max_blocks
404   );
405
406   if(idct->vertex_bufs.individual.quad.buffer == NULL)
407      return false;
408
409   idct->vertex_bufs.individual.pos.stride = sizeof(struct vertex2f);
410   idct->vertex_bufs.individual.pos.max_index = 4 * idct->max_blocks - 1;
411   idct->vertex_bufs.individual.pos.buffer_offset = 0;
412   idct->vertex_bufs.individual.pos.buffer = pipe_buffer_create
413   (
414      idct->pipe->screen,
415      PIPE_BIND_VERTEX_BUFFER,
416      sizeof(struct vertex2f) * 4 * idct->max_blocks
417   );
418
419   if(idct->vertex_bufs.individual.pos.buffer == NULL)
420      return false;
421
422   /* Rect element */
423   vertex_elems[0].src_offset = 0;
424   vertex_elems[0].instance_divisor = 0;
425   vertex_elems[0].vertex_buffer_index = 0;
426   vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
427
428   /* Pos element */
429   vertex_elems[1].src_offset = 0;
430   vertex_elems[1].instance_divisor = 0;
431   vertex_elems[1].vertex_buffer_index = 1;
432   vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
433
434   idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
435
436   return true;
437}
438
439static void
440cleanup_buffers(struct vl_idct *idct)
441{
442   unsigned i;
443
444   assert(idct);
445
446   for (i = 0; i < 4; ++i) {
447      pipe_sampler_view_reference(&idct->sampler_views.all[i], NULL);
448      pipe_resource_reference(&idct->textures.all[i], NULL);
449   }
450
451   idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
452   pipe_resource_reference(&idct->vertex_bufs.individual.quad.buffer, NULL);
453   pipe_resource_reference(&idct->vertex_bufs.individual.pos.buffer, NULL);
454}
455
456static void
457init_constants(struct vl_idct *idct)
458{
459   struct pipe_transfer *buf_transfer;
460   struct vertex2f *v;
461
462   unsigned i;
463
464   /* quad vectors */
465   v = pipe_buffer_map
466   (
467      idct->pipe,
468      idct->vertex_bufs.individual.quad.buffer,
469      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
470      &buf_transfer
471   );
472   for ( i = 0; i < idct->max_blocks; ++i)
473     memcpy(v + i * 4, &const_quad, sizeof(const_quad));
474   pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.quad.buffer, buf_transfer);
475}
476
477static void
478init_state(struct vl_idct *idct)
479{
480   struct pipe_sampler_state sampler;
481   unsigned i;
482
483   idct->num_blocks = 0;
484   idct->num_empty_blocks = 0;
485
486   idct->viewport.scale[0] = idct->destination->width0;
487   idct->viewport.scale[1] = idct->destination->height0;
488   idct->viewport.scale[2] = 1;
489   idct->viewport.scale[3] = 1;
490   idct->viewport.translate[0] = 0;
491   idct->viewport.translate[1] = 0;
492   idct->viewport.translate[2] = 0;
493   idct->viewport.translate[3] = 0;
494
495   idct->fb_state.width = idct->destination->width0;
496   idct->fb_state.height = idct->destination->height0;
497   idct->fb_state.nr_cbufs = 1;
498   idct->fb_state.zsbuf = NULL;
499
500   for (i = 0; i < 4; ++i) {
501      memset(&sampler, 0, sizeof(sampler));
502      sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
503      sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
504      sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
505      sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
506      sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
507      sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
508      sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
509      sampler.compare_func = PIPE_FUNC_ALWAYS;
510      sampler.normalized_coords = 1;
511      /*sampler.shadow_ambient = ; */
512      /*sampler.lod_bias = ; */
513      sampler.min_lod = 0;
514      /*sampler.max_lod = ; */
515      /*sampler.border_color[0] = ; */
516      /*sampler.max_anisotropy = ; */
517      idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
518   }
519}
520
521static void
522cleanup_state(struct vl_idct *idct)
523{
524   unsigned i;
525
526   for (i = 0; i < 4; ++i)
527      idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
528}
529
530struct pipe_resource *
531vl_idct_upload_matrix(struct pipe_context *pipe)
532{
533   struct pipe_resource template, *matrix;
534   struct pipe_transfer *buf_transfer;
535   unsigned i, j, pitch;
536   float *f;
537
538   struct pipe_box rect =
539   {
540      0, 0, 0,
541      BLOCK_WIDTH,
542      BLOCK_HEIGHT,
543      1
544   };
545
546   memset(&template, 0, sizeof(struct pipe_resource));
547   template.target = PIPE_TEXTURE_2D;
548   template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
549   template.last_level = 0;
550   template.width0 = 2;
551   template.height0 = 8;
552   template.depth0 = 1;
553   template.usage = PIPE_USAGE_IMMUTABLE;
554   template.bind = PIPE_BIND_SAMPLER_VIEW;
555   template.flags = 0;
556
557   matrix = pipe->screen->resource_create(pipe->screen, &template);
558
559   /* matrix */
560   buf_transfer = pipe->get_transfer
561   (
562      pipe, matrix,
563      u_subresource(0, 0),
564      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
565      &rect
566   );
567   pitch = buf_transfer->stride / util_format_get_blocksize(buf_transfer->resource->format);
568
569   f = pipe->transfer_map(pipe, buf_transfer);
570   for(i = 0; i < BLOCK_HEIGHT; ++i)
571      for(j = 0; j < BLOCK_WIDTH; ++j)
572         f[i * pitch * 4 + j] = const_matrix[j][i]; // transpose
573
574   pipe->transfer_unmap(pipe, buf_transfer);
575   pipe->transfer_destroy(pipe, buf_transfer);
576
577   return matrix;
578}
579
580bool
581vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst, struct pipe_resource *matrix)
582{
583   assert(idct && pipe && dst);
584
585   idct->pipe = pipe;
586   pipe_resource_reference(&idct->textures.individual.matrix, matrix);
587   pipe_resource_reference(&idct->textures.individual.transpose, matrix);
588   pipe_resource_reference(&idct->destination, dst);
589
590   init_state(idct);
591
592   if(!init_shaders(idct))
593      return false;
594
595   if(!init_buffers(idct)) {
596      cleanup_shaders(idct);
597      return false;
598   }
599
600   idct->surfaces.intermediate = idct->pipe->screen->get_tex_surface(
601      idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, 0,
602      PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
603
604   idct->surfaces.destination = idct->pipe->screen->get_tex_surface(
605      idct->pipe->screen, idct->destination, 0, 0, 0,
606      PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
607
608   init_constants(idct);
609   xfer_buffers_map(idct);
610
611   return true;
612}
613
614void
615vl_idct_cleanup(struct vl_idct *idct)
616{
617   idct->pipe->screen->tex_surface_destroy(idct->surfaces.destination);
618   idct->pipe->screen->tex_surface_destroy(idct->surfaces.intermediate);
619
620   cleanup_shaders(idct);
621   cleanup_buffers(idct);
622
623   cleanup_state(idct);
624
625   pipe_resource_reference(&idct->destination, NULL);
626}
627
628void
629vl_idct_add_block(struct vl_idct *idct, unsigned x, unsigned y, short *block)
630{
631   struct vertex2f v, *v_dst;
632
633   unsigned tex_pitch;
634   short *texels;
635
636   unsigned i;
637
638   assert(idct);
639
640   if(block) {
641      tex_pitch = idct->tex_transfer->stride / util_format_get_blocksize(idct->tex_transfer->resource->format);
642      texels = idct->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
643
644      for (i = 0; i < BLOCK_HEIGHT; ++i)
645         memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * 2);
646
647      /* non empty blocks fills the vector buffer from left to right */
648      v_dst = idct->vectors + idct->num_blocks * 4;
649
650      idct->num_blocks++;
651
652   } else {
653
654      /* while empty blocks fills the vector buffer from right to left */
655      v_dst = idct->vectors + (idct->max_blocks - idct->num_empty_blocks) * 4 - 4;
656
657      idct->num_empty_blocks++;
658   }
659
660   v.x = x;
661   v.y = y;
662
663   for (i = 0; i < 4; ++i) {
664      v_dst[i] = v;
665   }
666}
667
668void
669vl_idct_flush(struct vl_idct *idct)
670{
671   xfer_buffers_unmap(idct);
672
673   if(idct->num_blocks > 0) {
674
675      /* first stage */
676      idct->fb_state.cbufs[0] = idct->surfaces.intermediate;
677      idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
678      idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
679
680      idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
681      idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
682      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[0]);
683      idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
684      idct->pipe->bind_vs_state(idct->pipe, idct->vs);
685      idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
686
687      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
688
689      /* second stage */
690      idct->fb_state.cbufs[0] = idct->surfaces.destination;
691      idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
692      idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
693
694      idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
695      idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
696      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[1]);
697      idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
698      idct->pipe->bind_vs_state(idct->pipe, idct->vs);
699      idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
700
701      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
702   }
703
704   if(idct->num_empty_blocks > 0) {
705
706      /* empty block handling */
707      idct->fb_state.cbufs[0] = idct->surfaces.destination;
708      idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
709      idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
710
711      idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
712      idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
713      idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all);
714      idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all);
715      idct->pipe->bind_vs_state(idct->pipe, idct->vs);
716      idct->pipe->bind_fs_state(idct->pipe, idct->eb_fs);
717
718      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS,
719         (idct->max_blocks - idct->num_empty_blocks) * 4,
720         idct->num_empty_blocks * 4);
721   }
722
723   idct->num_blocks = 0;
724   idct->num_empty_blocks = 0;
725   xfer_buffers_map(idct);
726}
727