vl_idct.c revision 2c9db2484b7c1cec7a3a629f70a5aa840e16268e
1/**************************************************************************
2 *
3 * Copyright 2010 Christian König
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vl_idct.h"
29#include "util/u_draw.h"
30#include <assert.h>
31#include <pipe/p_context.h>
32#include <pipe/p_screen.h>
33#include <util/u_inlines.h>
34#include <util/u_sampler.h>
35#include <util/u_format.h>
36#include <tgsi/tgsi_ureg.h>
37#include "vl_types.h"
38
39#define BLOCK_WIDTH 8
40#define BLOCK_HEIGHT 8
41
42#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
43
44#define STAGE1_SCALE 4.0f
45#define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE)
46
47struct vertex_shader_consts
48{
49   struct vertex4f norm;
50};
51
52enum VS_INPUT
53{
54   VS_I_RECT,
55   VS_I_VPOS,
56
57   NUM_VS_INPUTS
58};
59
60enum VS_OUTPUT
61{
62   VS_O_VPOS,
63   VS_O_BLOCK,
64   VS_O_TEX,
65   VS_O_START,
66   VS_O_STEP
67};
68
69static const float const_matrix[8][8] = {
70   {  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.3535530f,  0.353553f,  0.3535530f },
71   {  0.4903930f,  0.4157350f,  0.2777850f,  0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
72   {  0.4619400f,  0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f,  0.191342f,  0.4619400f },
73   {  0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f,  0.2777850f,  0.4903930f,  0.097545f, -0.4157350f },
74   {  0.3535530f, -0.3535530f, -0.3535530f,  0.3535540f,  0.3535530f, -0.3535540f, -0.353553f,  0.3535530f },
75   {  0.2777850f, -0.4903930f,  0.0975452f,  0.4157350f, -0.4157350f, -0.0975451f,  0.490393f, -0.2777850f },
76   {  0.1913420f, -0.4619400f,  0.4619400f, -0.1913420f, -0.1913410f,  0.4619400f, -0.461940f,  0.1913420f },
77   {  0.0975451f, -0.2777850f,  0.4157350f, -0.4903930f,  0.4903930f, -0.4157350f,  0.277786f, -0.0975458f }
78};
79
80/* vertices for a quad covering a block */
81static const struct vertex2f const_quad[4] = {
82   {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f}
83};
84
85static void *
86create_vert_shader(struct vl_idct *idct)
87{
88   struct ureg_program *shader;
89   struct ureg_src norm, bs;
90   struct ureg_src vrect, vpos;
91   struct ureg_dst scale, t_vpos;
92   struct ureg_dst o_vpos, o_block, o_tex, o_start, o_step;
93
94   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
95   if (!shader)
96      return NULL;
97
98   norm = ureg_DECL_constant(shader, 0);
99   bs = ureg_imm2f(shader, BLOCK_WIDTH, BLOCK_HEIGHT);
100
101   scale = ureg_DECL_temporary(shader);
102   t_vpos = ureg_DECL_temporary(shader);
103
104   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
105   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
106
107   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
108   o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
109   o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
110   o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
111   o_step = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_STEP);
112
113   /*
114    * scale = norm * mbs;
115    *
116    * t_vpos = vpos + vrect
117    * o_vpos.xy = t_vpos * scale
118    * o_vpos.zw = vpos
119    *
120    * o_block = vrect
121    * o_tex = t_pos
122    * o_start = vpos * scale
123    * o_step = norm
124    *
125    */
126   ureg_MUL(shader, ureg_writemask(scale, TGSI_WRITEMASK_XY), norm, bs);
127
128   ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
129   ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), ureg_src(scale));
130   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
131   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
132
133   ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
134   ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
135   ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, ureg_src(scale));
136   ureg_MOV(shader, ureg_writemask(o_step, TGSI_WRITEMASK_XY), norm);
137
138   ureg_release_temporary(shader, t_vpos);
139   ureg_release_temporary(shader, scale);
140
141   ureg_END(shader);
142
143   return ureg_create_shader_and_destroy(shader, idct->pipe);
144}
145
146static void
147matrix_mul(struct ureg_program *shader, struct ureg_dst dst,
148           struct ureg_src tc[2], struct ureg_src sampler[2],
149           struct ureg_src start[2], struct ureg_src step[2],
150           float scale)
151{
152   struct ureg_dst t_tc[2], m[2][2], tmp[2];
153   unsigned i, j;
154
155   for(i = 0; i < 2; ++i) {
156      t_tc[i] = ureg_DECL_temporary(shader);
157      for(j = 0; j < 2; ++j)
158         m[i][j] = ureg_DECL_temporary(shader);
159      tmp[i] = ureg_DECL_temporary(shader);
160   }
161
162   /*
163    * m[0..1][0] = ?
164    * tmp[0..1] = dot4(m[0..1][0], m[0..1][1])
165    * fragment = tmp[0] + tmp[1]
166    */
167   ureg_MOV(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_X), start[0]);
168   ureg_MOV(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_Y), tc[0]);
169
170   ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_X), tc[1]);
171   ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_Y), start[1]);
172
173   for(i = 0; i < 2; ++i) {
174      for(j = 0; j < 4; ++j) {
175         /* Nouveau and r600g can't writemask tex dst regs (yet?), do in two steps */
176         ureg_TEX(shader, tmp[0], TGSI_TEXTURE_2D, ureg_src(t_tc[0]), sampler[0]);
177         ureg_MOV(shader, ureg_writemask(m[i][0], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp[0]), TGSI_SWIZZLE_X));
178
179         ureg_TEX(shader, tmp[1], TGSI_TEXTURE_2D, ureg_src(t_tc[1]), sampler[1]);
180         ureg_MOV(shader, ureg_writemask(m[i][1], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp[1]), TGSI_SWIZZLE_X));
181
182         ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_X), ureg_src(t_tc[0]), step[0]);
183         ureg_ADD(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_Y), ureg_src(t_tc[1]), step[1]);
184      }
185   }
186
187   ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(m[0][0]), ureg_src(m[0][1]));
188   ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(m[1][0]), ureg_src(m[1][1]));
189   ureg_ADD(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(tmp[0]), ureg_src(tmp[1]));
190   ureg_MUL(shader, dst, ureg_src(tmp[0]), ureg_imm1f(shader, scale));
191
192   for(i = 0; i < 2; ++i) {
193      ureg_release_temporary(shader, t_tc[i]);
194      for(j = 0; j < 2; ++j)
195         ureg_release_temporary(shader, m[i][j]);
196      ureg_release_temporary(shader, tmp[i]);
197   }
198}
199
200static void *
201create_transpose_frag_shader(struct vl_idct *idct)
202{
203   struct ureg_program *shader;
204   struct ureg_src tc[2], sampler[2];
205   struct ureg_src start[2], step[2];
206   struct ureg_dst fragment;
207
208   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
209   if (!shader)
210      return NULL;
211
212   tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
213   tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
214
215   start[0] = ureg_imm1f(shader, 0.0f);
216   start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
217
218   step[0] = ureg_imm1f(shader, 1.0f / BLOCK_HEIGHT);
219   step[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_STEP, TGSI_INTERPOLATE_CONSTANT);
220
221   sampler[0] = ureg_DECL_sampler(shader, 0);
222   sampler[1] = ureg_DECL_sampler(shader, 1);
223
224   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
225
226   matrix_mul(shader, fragment, tc, sampler, start, step, STAGE1_SCALE);
227
228   ureg_END(shader);
229
230   return ureg_create_shader_and_destroy(shader, idct->pipe);
231}
232
233static void *
234create_matrix_frag_shader(struct vl_idct *idct)
235{
236   struct ureg_program *shader;
237   struct ureg_src tc[2], sampler[2];
238   struct ureg_src start[2], step[2];
239   struct ureg_dst fragment;
240
241   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
242   if (!shader)
243      return NULL;
244
245   tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
246   tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
247
248   start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
249   start[1] = ureg_imm1f(shader, 0.0f);
250
251   step[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_STEP, TGSI_INTERPOLATE_CONSTANT);
252   step[1] = ureg_imm1f(shader, 1.0f / BLOCK_WIDTH);
253
254   sampler[0] = ureg_DECL_sampler(shader, 1);
255   sampler[1] = ureg_DECL_sampler(shader, 0);
256
257   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
258
259   matrix_mul(shader, fragment, tc, sampler, start, step, STAGE2_SCALE);
260
261   ureg_END(shader);
262
263   return ureg_create_shader_and_destroy(shader, idct->pipe);
264}
265
266static void *
267create_empty_block_frag_shader(struct vl_idct *idct)
268{
269   struct ureg_program *shader;
270   struct ureg_dst fragment;
271
272   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
273   if (!shader)
274      return NULL;
275
276   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
277
278   ureg_MOV(shader, fragment, ureg_imm1f(shader, 0.0f));
279
280   ureg_END(shader);
281
282   return ureg_create_shader_and_destroy(shader, idct->pipe);
283}
284
285static void
286xfer_buffers_map(struct vl_idct *idct)
287{
288   struct pipe_box rect =
289   {
290      0, 0, 0,
291      idct->destination->width0,
292      idct->destination->height0,
293      1
294   };
295
296   idct->tex_transfer = idct->pipe->get_transfer
297   (
298      idct->pipe, idct->textures.individual.source,
299      u_subresource(0, 0),
300      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
301      &rect
302   );
303
304   idct->texels = idct->pipe->transfer_map(idct->pipe, idct->tex_transfer);
305
306   idct->vectors = pipe_buffer_map
307   (
308      idct->pipe,
309      idct->vertex_bufs.individual.pos.buffer,
310      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
311      &idct->vec_transfer
312   );
313}
314
315static void
316xfer_buffers_unmap(struct vl_idct *idct)
317{
318   pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.pos.buffer, idct->vec_transfer);
319
320   idct->pipe->transfer_unmap(idct->pipe, idct->tex_transfer);
321   idct->pipe->transfer_destroy(idct->pipe, idct->tex_transfer);
322}
323
324static bool
325init_shaders(struct vl_idct *idct)
326{
327   idct->vs = create_vert_shader(idct);
328   idct->transpose_fs = create_transpose_frag_shader(idct);
329   idct->matrix_fs = create_matrix_frag_shader(idct);
330   idct->eb_fs = create_empty_block_frag_shader(idct);
331
332   return
333      idct->vs != NULL &&
334      idct->transpose_fs != NULL &&
335      idct->matrix_fs != NULL &&
336      idct->eb_fs != NULL;
337}
338
339static void
340cleanup_shaders(struct vl_idct *idct)
341{
342   idct->pipe->delete_vs_state(idct->pipe, idct->vs);
343   idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
344   idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
345   idct->pipe->delete_fs_state(idct->pipe, idct->eb_fs);
346}
347
348static bool
349init_buffers(struct vl_idct *idct)
350{
351   struct pipe_resource template;
352   struct pipe_sampler_view sampler_view;
353   struct pipe_vertex_element vertex_elems[2];
354   unsigned i;
355
356   idct->max_blocks =
357      align(idct->destination->width0, BLOCK_WIDTH) / BLOCK_WIDTH *
358      align(idct->destination->height0, BLOCK_HEIGHT) / BLOCK_HEIGHT *
359      idct->destination->depth0;
360
361   memset(&template, 0, sizeof(struct pipe_resource));
362   template.target = PIPE_TEXTURE_2D;
363   template.format = PIPE_FORMAT_R32_FLOAT;
364   template.last_level = 0;
365   template.width0 = 8;
366   template.height0 = 8;
367   template.depth0 = 1;
368   template.usage = PIPE_USAGE_IMMUTABLE;
369   template.bind = PIPE_BIND_SAMPLER_VIEW;
370   template.flags = 0;
371
372   idct->textures.individual.matrix = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
373   idct->textures.individual.transpose = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
374
375   template.format = idct->destination->format;
376   template.width0 = idct->destination->width0;
377   template.height0 = idct->destination->height0;
378   template.depth0 = idct->destination->depth0;
379   template.usage = PIPE_USAGE_DYNAMIC;
380   idct->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
381
382   template.usage = PIPE_USAGE_STATIC;
383   idct->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
384
385   for (i = 0; i < 4; ++i) {
386      if(idct->textures.all[i] == NULL)
387         return false; /* a texture failed to allocate */
388
389      u_sampler_view_default_template(&sampler_view, idct->textures.all[i], idct->textures.all[i]->format);
390      idct->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, idct->textures.all[i], &sampler_view);
391   }
392
393   idct->vertex_bufs.individual.quad.stride = sizeof(struct vertex2f);
394   idct->vertex_bufs.individual.quad.max_index = 4 * idct->max_blocks - 1;
395   idct->vertex_bufs.individual.quad.buffer_offset = 0;
396   idct->vertex_bufs.individual.quad.buffer = pipe_buffer_create
397   (
398      idct->pipe->screen,
399      PIPE_BIND_VERTEX_BUFFER,
400      sizeof(struct vertex2f) * 4 * idct->max_blocks
401   );
402
403   if(idct->vertex_bufs.individual.quad.buffer == NULL)
404      return false;
405
406   idct->vertex_bufs.individual.pos.stride = sizeof(struct vertex2f);
407   idct->vertex_bufs.individual.pos.max_index = 4 * idct->max_blocks - 1;
408   idct->vertex_bufs.individual.pos.buffer_offset = 0;
409   idct->vertex_bufs.individual.pos.buffer = pipe_buffer_create
410   (
411      idct->pipe->screen,
412      PIPE_BIND_VERTEX_BUFFER,
413      sizeof(struct vertex2f) * 4 * idct->max_blocks
414   );
415
416   if(idct->vertex_bufs.individual.pos.buffer == NULL)
417      return false;
418
419   /* Rect element */
420   vertex_elems[0].src_offset = 0;
421   vertex_elems[0].instance_divisor = 0;
422   vertex_elems[0].vertex_buffer_index = 0;
423   vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
424
425   /* Pos element */
426   vertex_elems[1].src_offset = 0;
427   vertex_elems[1].instance_divisor = 0;
428   vertex_elems[1].vertex_buffer_index = 1;
429   vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
430
431   idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
432
433   idct->vs_const_buf = pipe_buffer_create
434   (
435      idct->pipe->screen,
436      PIPE_BIND_CONSTANT_BUFFER,
437      sizeof(struct vertex_shader_consts)
438   );
439
440   if(idct->vs_const_buf == NULL)
441      return false;
442
443   return true;
444}
445
446static void
447cleanup_buffers(struct vl_idct *idct)
448{
449   unsigned i;
450
451   assert(idct);
452
453   pipe_resource_reference(&idct->vs_const_buf, NULL);
454
455   for (i = 0; i < 4; ++i) {
456      pipe_sampler_view_reference(&idct->sampler_views.all[i], NULL);
457      pipe_resource_reference(&idct->textures.all[i], NULL);
458   }
459
460   idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
461   pipe_resource_reference(&idct->vertex_bufs.individual.quad.buffer, NULL);
462   pipe_resource_reference(&idct->vertex_bufs.individual.pos.buffer, NULL);
463}
464
465static void
466init_constants(struct vl_idct *idct)
467{
468   struct pipe_transfer *buf_transfer;
469   struct vertex_shader_consts *vs_consts;
470   struct vertex2f *v;
471   float *f;
472
473   struct pipe_box rect =
474   {
475      0, 0, 0,
476      BLOCK_WIDTH,
477      BLOCK_HEIGHT,
478      1
479   };
480
481   unsigned i, j, pitch;
482
483   /* quad vectors */
484   v = pipe_buffer_map
485   (
486      idct->pipe,
487      idct->vertex_bufs.individual.quad.buffer,
488      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
489      &buf_transfer
490   );
491   for ( i = 0; i < idct->max_blocks; ++i)
492     memcpy(v + i * 4, &const_quad, sizeof(const_quad));
493   pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.quad.buffer, buf_transfer);
494
495   /* transposed matrix */
496   buf_transfer = idct->pipe->get_transfer
497   (
498      idct->pipe, idct->textures.individual.transpose,
499      u_subresource(0, 0),
500      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
501      &rect
502   );
503   pitch = buf_transfer->stride / util_format_get_blocksize(buf_transfer->resource->format);
504
505   f = idct->pipe->transfer_map(idct->pipe, buf_transfer);
506   for(i = 0; i < BLOCK_HEIGHT; ++i)
507      for(j = 0; j < BLOCK_WIDTH; ++j)
508         f[i * pitch + j] = const_matrix[j][i]; // transpose
509
510   idct->pipe->transfer_unmap(idct->pipe, buf_transfer);
511   idct->pipe->transfer_destroy(idct->pipe, buf_transfer);
512
513   /* matrix */
514   buf_transfer = idct->pipe->get_transfer
515   (
516      idct->pipe, idct->textures.individual.matrix,
517      u_subresource(0, 0),
518      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
519      &rect
520   );
521   pitch = buf_transfer->stride / util_format_get_blocksize(buf_transfer->resource->format);
522
523   f = idct->pipe->transfer_map(idct->pipe, buf_transfer);
524   for(i = 0; i < BLOCK_HEIGHT; ++i)
525      for(j = 0; j < BLOCK_WIDTH; ++j)
526         f[i * pitch + j] = const_matrix[i][j];
527
528   idct->pipe->transfer_unmap(idct->pipe, buf_transfer);
529   idct->pipe->transfer_destroy(idct->pipe, buf_transfer);
530
531   /* normalisation constants */
532   vs_consts = pipe_buffer_map
533   (
534      idct->pipe, idct->vs_const_buf,
535      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
536      &buf_transfer
537   );
538
539   vs_consts->norm.x = 1.0f / idct->destination->width0;
540   vs_consts->norm.y = 1.0f / idct->destination->height0;
541
542   pipe_buffer_unmap(idct->pipe, idct->vs_const_buf, buf_transfer);
543}
544
545static void
546init_state(struct vl_idct *idct)
547{
548   struct pipe_sampler_state sampler;
549   unsigned i;
550
551   idct->num_blocks = 0;
552   idct->num_empty_blocks = 0;
553
554   idct->viewport.scale[0] = idct->destination->width0;
555   idct->viewport.scale[1] = idct->destination->height0;
556   idct->viewport.scale[2] = 1;
557   idct->viewport.scale[3] = 1;
558   idct->viewport.translate[0] = 0;
559   idct->viewport.translate[1] = 0;
560   idct->viewport.translate[2] = 0;
561   idct->viewport.translate[3] = 0;
562
563   idct->fb_state.width = idct->destination->width0;
564   idct->fb_state.height = idct->destination->height0;
565   idct->fb_state.nr_cbufs = 1;
566   idct->fb_state.zsbuf = NULL;
567
568   for (i = 0; i < 4; ++i) {
569      memset(&sampler, 0, sizeof(sampler));
570      sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
571      sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
572      sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
573      sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
574      sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
575      sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
576      sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
577      sampler.compare_func = PIPE_FUNC_ALWAYS;
578      sampler.normalized_coords = 1;
579      /*sampler.shadow_ambient = ; */
580      /*sampler.lod_bias = ; */
581      sampler.min_lod = 0;
582      /*sampler.max_lod = ; */
583      /*sampler.border_color[0] = ; */
584      /*sampler.max_anisotropy = ; */
585      idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
586   }
587}
588
589static void
590cleanup_state(struct vl_idct *idct)
591{
592   unsigned i;
593
594   for (i = 0; i < 4; ++i)
595      idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
596}
597
598bool
599vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst)
600{
601   assert(idct && pipe && dst);
602
603   idct->pipe = pipe;
604   pipe_resource_reference(&idct->destination, dst);
605
606   init_state(idct);
607
608   if(!init_shaders(idct))
609      return false;
610
611   if(!init_buffers(idct)) {
612      cleanup_shaders(idct);
613      return false;
614   }
615
616   idct->surfaces.intermediate = idct->pipe->screen->get_tex_surface(
617      idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, 0,
618      PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
619
620   idct->surfaces.destination = idct->pipe->screen->get_tex_surface(
621      idct->pipe->screen, idct->destination, 0, 0, 0,
622      PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
623
624   init_constants(idct);
625   xfer_buffers_map(idct);
626
627   return true;
628}
629
630void
631vl_idct_cleanup(struct vl_idct *idct)
632{
633   idct->pipe->screen->tex_surface_destroy(idct->surfaces.destination);
634   idct->pipe->screen->tex_surface_destroy(idct->surfaces.intermediate);
635
636   cleanup_shaders(idct);
637   cleanup_buffers(idct);
638
639   cleanup_state(idct);
640
641   pipe_resource_reference(&idct->destination, NULL);
642}
643
644void
645vl_idct_add_block(struct vl_idct *idct, unsigned x, unsigned y, short *block)
646{
647   struct vertex2f v, *v_dst;
648
649   unsigned tex_pitch;
650   short *texels;
651
652   unsigned i;
653
654   assert(idct);
655
656   if(block) {
657      tex_pitch = idct->tex_transfer->stride / util_format_get_blocksize(idct->tex_transfer->resource->format);
658      texels = idct->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
659
660      for (i = 0; i < BLOCK_HEIGHT; ++i)
661         memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * 2);
662
663      /* non empty blocks fills the vector buffer from left to right */
664      v_dst = idct->vectors + idct->num_blocks * 4;
665
666      idct->num_blocks++;
667
668   } else {
669
670      /* while empty blocks fills the vector buffer from right to left */
671      v_dst = idct->vectors + (idct->max_blocks - idct->num_empty_blocks) * 4 - 4;
672
673      idct->num_empty_blocks++;
674   }
675
676   v.x = x;
677   v.y = y;
678
679   for (i = 0; i < 4; ++i) {
680      v_dst[i] = v;
681   }
682}
683
684void
685vl_idct_flush(struct vl_idct *idct)
686{
687   xfer_buffers_unmap(idct);
688
689   idct->pipe->set_constant_buffer(idct->pipe, PIPE_SHADER_VERTEX, 0, idct->vs_const_buf);
690
691   if(idct->num_blocks > 0) {
692
693      /* first stage */
694      idct->fb_state.cbufs[0] = idct->surfaces.intermediate;
695      idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
696      idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
697
698      idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
699      idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
700      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[0]);
701      idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
702      idct->pipe->bind_vs_state(idct->pipe, idct->vs);
703      idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
704
705      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
706
707      /* second stage */
708      idct->fb_state.cbufs[0] = idct->surfaces.destination;
709      idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
710      idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
711
712      idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
713      idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
714      idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[1]);
715      idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
716      idct->pipe->bind_vs_state(idct->pipe, idct->vs);
717      idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
718
719      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
720   }
721
722   if(idct->num_empty_blocks > 0) {
723
724      /* empty block handling */
725      idct->fb_state.cbufs[0] = idct->surfaces.destination;
726      idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
727      idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
728
729      idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
730      idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
731      idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all);
732      idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all);
733      idct->pipe->bind_vs_state(idct->pipe, idct->vs);
734      idct->pipe->bind_fs_state(idct->pipe, idct->eb_fs);
735
736      util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS,
737         (idct->max_blocks - idct->num_empty_blocks) * 4,
738         idct->num_empty_blocks * 4);
739   }
740
741   idct->num_blocks = 0;
742   idct->num_empty_blocks = 0;
743   xfer_buffers_map(idct);
744}
745