vl_mc.c revision 9937e85bccbf2f6bd77d061ab0488d45e9366f10
1/**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <assert.h>
29
30#include <pipe/p_context.h>
31
32#include <util/u_sampler.h>
33#include <util/u_draw.h>
34
35#include <tgsi/tgsi_ureg.h>
36
37#include "vl_defines.h"
38#include "vl_vertex_buffers.h"
39#include "vl_mc.h"
40#include "vl_idct.h"
41
42enum VS_OUTPUT
43{
44   VS_O_VPOS,
45   VS_O_VTOP,
46   VS_O_VBOTTOM,
47
48   VS_O_FLAGS = VS_O_VTOP,
49   VS_O_VTEX = VS_O_VBOTTOM
50};
51
52static struct ureg_dst
53calc_position(struct vl_mc *r, struct ureg_program *shader, struct ureg_src block_scale)
54{
55   struct ureg_src vrect, vpos;
56   struct ureg_dst t_vpos;
57   struct ureg_dst o_vpos;
58
59   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
60   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
61
62   t_vpos = ureg_DECL_temporary(shader);
63
64   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
65
66   /*
67    * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
68    *
69    * t_vpos = (vpos + vrect) * block_scale
70    * o_vpos.xy = t_vpos
71    * o_vpos.zw = vpos
72    */
73   ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
74   ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), block_scale);
75   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
76   ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
77
78   return t_vpos;
79}
80
81static struct ureg_dst
82calc_line(struct ureg_program *shader)
83{
84   struct ureg_dst tmp;
85   struct ureg_src pos;
86
87   tmp = ureg_DECL_temporary(shader);
88
89   pos = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS, TGSI_INTERPOLATE_LINEAR);
90
91   /*
92    * tmp.y = fraction(pos.y / 2) >= 0.5 ? 1 : 0
93    */
94   ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), pos, ureg_imm1f(shader, 0.5f));
95   ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(tmp));
96   ureg_SGE(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(tmp), ureg_imm1f(shader, 0.5f));
97
98   return tmp;
99}
100
101static void *
102create_ref_vert_shader(struct vl_mc *r)
103{
104   struct ureg_program *shader;
105   struct ureg_src mv_scale;
106   struct ureg_src vrect, vmv[2];
107   struct ureg_dst t_vpos;
108   struct ureg_dst o_vpos, o_vmv[2];
109   unsigned i;
110
111   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
112   if (!shader)
113      return NULL;
114
115   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
116   vmv[0] = ureg_DECL_vs_input(shader, VS_I_MV_TOP);
117   vmv[1] = ureg_DECL_vs_input(shader, VS_I_MV_BOTTOM);
118
119   t_vpos = calc_position(r, shader, ureg_imm2f(shader,
120      (float)MACROBLOCK_WIDTH / r->buffer_width,
121      (float)MACROBLOCK_HEIGHT / r->buffer_height)
122   );
123
124   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
125   o_vmv[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
126   o_vmv[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
127
128   /*
129    * mv_scale.xy = 0.5 / (dst.width, dst.height);
130    * mv_scale.z = 1.0f / 4.0f
131    * mv_scale.w = 1.0f / 255.0f
132    *
133    * // Apply motion vectors
134    * o_vmv[0..1].xy = vmv[0..1] * mv_scale + t_vpos
135    * o_vmv[0..1].zw = vmv[0..1] * mv_scale
136    *
137    */
138
139   mv_scale = ureg_imm4f(shader,
140      0.5f / r->buffer_width,
141      0.5f / r->buffer_height,
142      1.0f / 4.0f,
143      1.0f / PIPE_VIDEO_MV_WEIGHT_MAX);
144
145   for (i = 0; i < 2; ++i) {
146      ureg_MAD(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_XY), mv_scale, vmv[i], ureg_src(t_vpos));
147      ureg_MUL(shader, ureg_writemask(o_vmv[i], TGSI_WRITEMASK_ZW), mv_scale, vmv[i]);
148   }
149
150   ureg_release_temporary(shader, t_vpos);
151
152   ureg_END(shader);
153
154   return ureg_create_shader_and_destroy(shader, r->pipe);
155}
156
157static void *
158create_ref_frag_shader(struct vl_mc *r)
159{
160   const float y_scale =
161      r->buffer_height / 2 *
162      r->macroblock_size / MACROBLOCK_HEIGHT;
163
164   struct ureg_program *shader;
165   struct ureg_src tc[2], sampler;
166   struct ureg_dst ref, field;
167   struct ureg_dst fragment;
168   unsigned label;
169
170   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
171   if (!shader)
172      return NULL;
173
174   tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
175   tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
176
177   sampler = ureg_DECL_sampler(shader, 0);
178   ref = ureg_DECL_temporary(shader);
179
180   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
181
182   field = calc_line(shader);
183
184   /*
185    * ref = field.z ? tc[1] : tc[0]
186    *
187    * // Adjust tc acording to top/bottom field selection
188    * if (|ref.z|) {
189    *    ref.y *= y_scale
190    *    ref.y = floor(ref.y)
191    *    ref.y += ref.z
192    *    ref.y /= y_scale
193    * }
194    * fragment.xyz = tex(ref, sampler[0])
195    */
196   ureg_CMP(shader, ureg_writemask(ref, TGSI_WRITEMASK_XYZ),
197            ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
198            tc[1], tc[0]);
199   ureg_CMP(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W),
200            ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
201            tc[1], tc[0]);
202
203   ureg_IF(shader, ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z), &label);
204
205      ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
206               ureg_src(ref), ureg_imm1f(shader, y_scale));
207      ureg_FLR(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y), ureg_src(ref));
208      ureg_ADD(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
209               ureg_src(ref), ureg_scalar(ureg_src(ref), TGSI_SWIZZLE_Z));
210      ureg_MUL(shader, ureg_writemask(ref, TGSI_WRITEMASK_Y),
211               ureg_src(ref), ureg_imm1f(shader, 1.0f / y_scale));
212
213   ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
214   ureg_ENDIF(shader);
215
216   ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), TGSI_TEXTURE_2D, ureg_src(ref), sampler);
217
218   ureg_release_temporary(shader, ref);
219
220   ureg_release_temporary(shader, field);
221   ureg_END(shader);
222
223   return ureg_create_shader_and_destroy(shader, r->pipe);
224}
225
226static void *
227create_ycbcr_vert_shader(struct vl_mc *r, vl_mc_ycbcr_vert_shader vs_callback, void *callback_priv)
228{
229   struct ureg_program *shader;
230
231   struct ureg_src vrect, vpos;
232   struct ureg_dst t_vpos, t_vtex;
233   struct ureg_dst o_vpos, o_flags;
234
235   struct vertex2f scale = {
236      (float)BLOCK_WIDTH / r->buffer_width * MACROBLOCK_WIDTH / r->macroblock_size,
237      (float)BLOCK_HEIGHT / r->buffer_height * MACROBLOCK_HEIGHT / r->macroblock_size
238   };
239
240   unsigned label;
241
242   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
243   if (!shader)
244      return NULL;
245
246   vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
247   vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
248
249   t_vpos = calc_position(r, shader, ureg_imm2f(shader, scale.x, scale.y));
250   t_vtex = ureg_DECL_temporary(shader);
251
252   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
253   o_flags = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_FLAGS);
254
255   /*
256    * o_vtex.xy = t_vpos
257    * o_flags.z = intra * 0.5
258    *
259    * if(interlaced) {
260    *    t_vtex.xy = vrect.y ? { 0, scale.y } : { -scale.y : 0 }
261    *    t_vtex.z = vpos.y % 2
262    *    t_vtex.y = t_vtex.z ? t_vtex.x : t_vtex.y
263    *    o_vpos.y = t_vtex.y + t_vpos.y
264    *
265    *    o_flags.w = t_vtex.z ? 0 : 1
266    * }
267    *
268    */
269
270   vs_callback(callback_priv, r, shader, VS_O_VTEX, t_vpos);
271
272   ureg_MUL(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_Z),
273            ureg_scalar(vpos, TGSI_SWIZZLE_Z), ureg_imm1f(shader, 0.5f));
274   ureg_MOV(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_W), ureg_imm1f(shader, -1.0f));
275
276   if (r->macroblock_size == MACROBLOCK_HEIGHT) { //TODO
277      ureg_IF(shader, ureg_scalar(vpos, TGSI_SWIZZLE_W), &label);
278
279         ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY),
280                  ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_Y)),
281                  ureg_imm2f(shader, 0.0f, scale.y),
282                  ureg_imm2f(shader, -scale.y, 0.0f));
283         ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z),
284                  ureg_scalar(vpos, TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.5f));
285
286         ureg_FRC(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Z), ureg_src(t_vtex));
287
288         ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y),
289                  ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)),
290                  ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_X),
291                  ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Y));
292         ureg_ADD(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_Y),
293                  ureg_src(t_vpos), ureg_src(t_vtex));
294
295         ureg_CMP(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_W),
296                  ureg_negate(ureg_scalar(ureg_src(t_vtex), TGSI_SWIZZLE_Z)),
297                  ureg_imm1f(shader, 0.0f), ureg_imm1f(shader, 1.0f));
298
299      ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
300      ureg_ENDIF(shader);
301   }
302
303   ureg_release_temporary(shader, t_vtex);
304   ureg_release_temporary(shader, t_vpos);
305
306   ureg_END(shader);
307
308   return ureg_create_shader_and_destroy(shader, r->pipe);
309}
310
311static void *
312create_ycbcr_frag_shader(struct vl_mc *r, float scale, vl_mc_ycbcr_frag_shader fs_callback, void *callback_priv)
313{
314   struct ureg_program *shader;
315   struct ureg_src flags;
316   struct ureg_dst tmp;
317   struct ureg_dst fragment;
318   unsigned label;
319
320   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
321   if (!shader)
322      return NULL;
323
324   flags = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_FLAGS, TGSI_INTERPOLATE_LINEAR);
325
326   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
327
328   tmp = calc_line(shader);
329
330   /*
331    * if (field == tc.w)
332    *    kill();
333    * else {
334    *    fragment.xyz  = tex(tc, sampler) * scale + tc.z
335    *    fragment.w = 1.0f
336    * }
337    */
338
339   ureg_SEQ(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
340            ureg_scalar(flags, TGSI_SWIZZLE_W), ureg_src(tmp));
341
342   ureg_IF(shader, ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), &label);
343
344      ureg_KILP(shader);
345
346   ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
347   ureg_ELSE(shader, &label);
348
349      fs_callback(callback_priv, r, shader, VS_O_VTEX, tmp);
350
351      if (scale != 1.0f)
352         ureg_MAD(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
353                  ureg_src(tmp), ureg_imm1f(shader, scale),
354                  ureg_scalar(flags, TGSI_SWIZZLE_Z));
355      else
356         ureg_ADD(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
357                  ureg_src(tmp), ureg_scalar(flags, TGSI_SWIZZLE_Z));
358
359      ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
360
361   ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
362   ureg_ENDIF(shader);
363
364   ureg_release_temporary(shader, tmp);
365
366   ureg_END(shader);
367
368   return ureg_create_shader_and_destroy(shader, r->pipe);
369}
370
371static bool
372init_pipe_state(struct vl_mc *r)
373{
374   struct pipe_sampler_state sampler;
375   struct pipe_blend_state blend;
376   struct pipe_rasterizer_state rs_state;
377   unsigned i;
378
379   assert(r);
380
381   memset(&sampler, 0, sizeof(sampler));
382   sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
383   sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
384   sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
385   sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
386   sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
387   sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
388   sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
389   sampler.compare_func = PIPE_FUNC_ALWAYS;
390   sampler.normalized_coords = 1;
391   r->sampler_ref = r->pipe->create_sampler_state(r->pipe, &sampler);
392   if (!r->sampler_ref)
393      goto error_sampler_ref;
394
395   for (i = 0; i < VL_MC_NUM_BLENDERS; ++i) {
396      memset(&blend, 0, sizeof blend);
397      blend.independent_blend_enable = 0;
398      blend.rt[0].blend_enable = 1;
399      blend.rt[0].rgb_func = PIPE_BLEND_ADD;
400      blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
401      blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
402      blend.rt[0].alpha_func = PIPE_BLEND_ADD;
403      blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
404      blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
405      blend.logicop_enable = 0;
406      blend.logicop_func = PIPE_LOGICOP_CLEAR;
407      blend.rt[0].colormask = i;
408      blend.dither = 0;
409      r->blend_clear[i] = r->pipe->create_blend_state(r->pipe, &blend);
410      if (!r->blend_clear[i])
411         goto error_blend;
412
413      blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
414      blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
415      r->blend_add[i] = r->pipe->create_blend_state(r->pipe, &blend);
416      if (!r->blend_add[i])
417         goto error_blend;
418   }
419
420   memset(&rs_state, 0, sizeof(rs_state));
421   /*rs_state.sprite_coord_enable */
422   rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
423   rs_state.point_quad_rasterization = true;
424   rs_state.point_size = BLOCK_WIDTH;
425   rs_state.gl_rasterization_rules = true;
426   r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
427   if (!r->rs_state)
428      goto error_rs_state;
429
430   return true;
431
432error_rs_state:
433error_blend:
434   for (i = 0; i < VL_MC_NUM_BLENDERS; ++i) {
435      if (r->blend_add[i])
436         r->pipe->delete_blend_state(r->pipe, r->blend_add[i]);
437
438      if (r->blend_clear[i])
439         r->pipe->delete_blend_state(r->pipe, r->blend_clear[i]);
440   }
441
442   r->pipe->delete_sampler_state(r->pipe, r->sampler_ref);
443
444error_sampler_ref:
445   return false;
446}
447
448static void
449cleanup_pipe_state(struct vl_mc *r)
450{
451   unsigned i;
452
453   assert(r);
454
455   r->pipe->delete_sampler_state(r->pipe, r->sampler_ref);
456   for (i = 0; i < VL_MC_NUM_BLENDERS; ++i) {
457      r->pipe->delete_blend_state(r->pipe, r->blend_clear[i]);
458      r->pipe->delete_blend_state(r->pipe, r->blend_add[i]);
459   }
460   r->pipe->delete_rasterizer_state(r->pipe, r->rs_state);
461}
462
463bool
464vl_mc_init(struct vl_mc *renderer, struct pipe_context *pipe,
465           unsigned buffer_width, unsigned buffer_height,
466           unsigned macroblock_size, float scale,
467           vl_mc_ycbcr_vert_shader vs_callback,
468           vl_mc_ycbcr_frag_shader fs_callback,
469           void *callback_priv)
470{
471   assert(renderer);
472   assert(pipe);
473
474   memset(renderer, 0, sizeof(struct vl_mc));
475
476   renderer->pipe = pipe;
477   renderer->buffer_width = buffer_width;
478   renderer->buffer_height = buffer_height;
479   renderer->macroblock_size = macroblock_size;
480
481   if (!init_pipe_state(renderer))
482      goto error_pipe_state;
483
484   renderer->vs_ref = create_ref_vert_shader(renderer);
485   if (!renderer->vs_ref)
486      goto error_vs_ref;
487
488   renderer->vs_ycbcr = create_ycbcr_vert_shader(renderer, vs_callback, callback_priv);
489   if (!renderer->vs_ycbcr)
490      goto error_vs_ycbcr;
491
492   renderer->fs_ref = create_ref_frag_shader(renderer);
493   if (!renderer->fs_ref)
494      goto error_fs_ref;
495
496   renderer->fs_ycbcr = create_ycbcr_frag_shader(renderer, scale, fs_callback, callback_priv);
497   if (!renderer->fs_ycbcr)
498      goto error_fs_ycbcr;
499
500   return true;
501
502error_fs_ycbcr:
503   renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ref);
504
505error_fs_ref:
506   renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ycbcr);
507
508error_vs_ycbcr:
509   renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ref);
510
511error_vs_ref:
512   cleanup_pipe_state(renderer);
513
514error_pipe_state:
515   return false;
516}
517
518void
519vl_mc_cleanup(struct vl_mc *renderer)
520{
521   assert(renderer);
522
523   cleanup_pipe_state(renderer);
524
525   renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ref);
526   renderer->pipe->delete_vs_state(renderer->pipe, renderer->vs_ycbcr);
527   renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ref);
528   renderer->pipe->delete_fs_state(renderer->pipe, renderer->fs_ycbcr);
529}
530
531bool
532vl_mc_init_buffer(struct vl_mc *renderer, struct vl_mc_buffer *buffer)
533{
534   assert(renderer && buffer);
535
536   buffer->renderer = renderer;
537
538   buffer->viewport.scale[2] = 1;
539   buffer->viewport.scale[3] = 1;
540   buffer->viewport.translate[0] = 0;
541   buffer->viewport.translate[1] = 0;
542   buffer->viewport.translate[2] = 0;
543   buffer->viewport.translate[3] = 0;
544
545   buffer->fb_state.nr_cbufs = 1;
546   buffer->fb_state.zsbuf = NULL;
547
548   return true;
549}
550
551void
552vl_mc_cleanup_buffer(struct vl_mc_buffer *buffer)
553{
554   assert(buffer);
555}
556
557void
558vl_mc_set_surface(struct vl_mc_buffer *buffer, struct pipe_surface *surface)
559{
560   assert(buffer && surface);
561
562   buffer->surface_cleared = false;
563
564   buffer->viewport.scale[0] = surface->width;
565   buffer->viewport.scale[1] = surface->height;
566
567   buffer->fb_state.width = surface->width;
568   buffer->fb_state.height = surface->height;
569   buffer->fb_state.cbufs[0] = surface;
570}
571
572static void
573prepare_pipe_4_rendering(struct vl_mc_buffer *buffer, unsigned mask)
574{
575   struct vl_mc *renderer;
576
577   assert(buffer);
578
579   renderer = buffer->renderer;
580   renderer->pipe->bind_rasterizer_state(renderer->pipe, renderer->rs_state);
581
582   if (buffer->surface_cleared)
583      renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_add[mask]);
584   else
585      renderer->pipe->bind_blend_state(renderer->pipe, renderer->blend_clear[mask]);
586
587   renderer->pipe->set_framebuffer_state(renderer->pipe, &buffer->fb_state);
588   renderer->pipe->set_viewport_state(renderer->pipe, &buffer->viewport);
589}
590
591void
592vl_mc_render_ref(struct vl_mc_buffer *buffer, struct pipe_sampler_view *ref)
593{
594   struct vl_mc *renderer;
595
596   assert(buffer && ref);
597
598   prepare_pipe_4_rendering(buffer, PIPE_MASK_R | PIPE_MASK_G | PIPE_MASK_B);
599
600   renderer = buffer->renderer;
601
602   renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs_ref);
603   renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ref);
604
605   renderer->pipe->set_fragment_sampler_views(renderer->pipe, 1, &ref);
606   renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 1, &renderer->sampler_ref);
607
608   util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0,
609                              renderer->buffer_width / MACROBLOCK_WIDTH *
610                              renderer->buffer_height / MACROBLOCK_HEIGHT);
611
612   buffer->surface_cleared = true;
613}
614
615void
616vl_mc_render_ycbcr(struct vl_mc_buffer *buffer, unsigned component, unsigned num_instances)
617{
618   struct vl_mc *renderer;
619
620   assert(buffer);
621
622   if (num_instances == 0)
623      return;
624
625   prepare_pipe_4_rendering(buffer, 1 << component);
626
627   renderer = buffer->renderer;
628
629   renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs_ycbcr);
630   renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs_ycbcr);
631
632   util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances);
633}
634