vl_compositor.c revision 49967950a56de276ffcbaea80acbc9f5bd3207bc
1/**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <assert.h>
29
30#include <pipe/p_compiler.h>
31#include <pipe/p_context.h>
32
33#include <util/u_memory.h>
34#include <util/u_draw.h>
35#include <util/u_surface.h>
36
37#include <tgsi/tgsi_ureg.h>
38
39#include "vl_csc.h"
40#include "vl_types.h"
41#include "vl_compositor.h"
42
43typedef float csc_matrix[16];
44
45static void *
46create_vert_shader(struct vl_compositor *c)
47{
48   struct ureg_program *shader;
49   struct ureg_src vpos, vtex;
50   struct ureg_dst o_vpos, o_vtex;
51
52   shader = ureg_create(TGSI_PROCESSOR_VERTEX);
53   if (!shader)
54      return false;
55
56   vpos = ureg_DECL_vs_input(shader, 0);
57   vtex = ureg_DECL_vs_input(shader, 1);
58   o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
59   o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, 1);
60
61   /*
62    * o_vpos = vpos
63    * o_vtex = vtex
64    */
65   ureg_MOV(shader, o_vpos, vpos);
66   ureg_MOV(shader, o_vtex, vtex);
67
68   ureg_END(shader);
69
70   return ureg_create_shader_and_destroy(shader, c->pipe);
71}
72
73static void *
74create_frag_shader_video_buffer(struct vl_compositor *c)
75{
76   struct ureg_program *shader;
77   struct ureg_src tc;
78   struct ureg_src csc[3];
79   struct ureg_src sampler[3];
80   struct ureg_dst texel;
81   struct ureg_dst fragment;
82   unsigned i;
83
84   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
85   if (!shader)
86      return false;
87
88   tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
89   for (i = 0; i < 3; ++i) {
90      csc[i] = ureg_DECL_constant(shader, i);
91      sampler[i] = ureg_DECL_sampler(shader, i);
92   }
93   texel = ureg_DECL_temporary(shader);
94   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
95
96   /*
97    * texel.xyz = tex(tc, sampler[i])
98    * fragment = csc * texel
99    */
100   for (i = 0; i < 3; ++i)
101      ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D, tc, sampler[i]);
102
103   ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
104
105   for (i = 0; i < 3; ++i)
106      ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
107
108   ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
109
110   ureg_release_temporary(shader, texel);
111   ureg_END(shader);
112
113   return ureg_create_shader_and_destroy(shader, c->pipe);
114}
115
116static void *
117create_frag_shader_palette(struct vl_compositor *c)
118{
119   struct ureg_program *shader;
120   struct ureg_src csc[3];
121   struct ureg_src tc;
122   struct ureg_src sampler;
123   struct ureg_src palette;
124   struct ureg_dst texel;
125   struct ureg_dst fragment;
126   unsigned i;
127
128   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
129   if (!shader)
130      return false;
131
132   for (i = 0; i < 3; ++i)
133      csc[i] = ureg_DECL_constant(shader, i);
134
135   tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
136   sampler = ureg_DECL_sampler(shader, 0);
137   palette = ureg_DECL_sampler(shader, 1);
138   texel = ureg_DECL_temporary(shader);
139   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
140
141   /*
142    * texel = tex(tc, sampler)
143    * fragment.xyz = tex(texel, palette) * csc
144    * fragment.a = texel.a
145    */
146   ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
147   ureg_MUL(shader, ureg_writemask(texel, TGSI_WRITEMASK_X), ureg_src(texel), ureg_imm1f(shader, 15.0f / 16.0f));
148   ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(texel));
149
150   ureg_TEX(shader, texel, TGSI_TEXTURE_1D, ureg_src(texel), palette);
151
152   for (i = 0; i < 3; ++i)
153      ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
154
155   ureg_release_temporary(shader, texel);
156   ureg_END(shader);
157
158   return ureg_create_shader_and_destroy(shader, c->pipe);
159}
160
161static void *
162create_frag_shader_rgba(struct vl_compositor *c)
163{
164   struct ureg_program *shader;
165   struct ureg_src tc;
166   struct ureg_src sampler;
167   struct ureg_dst fragment;
168
169   shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
170   if (!shader)
171      return false;
172
173   tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
174   sampler = ureg_DECL_sampler(shader, 0);
175   fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
176
177   /*
178    * fragment = tex(tc, sampler)
179    */
180   ureg_TEX(shader, fragment, TGSI_TEXTURE_2D, tc, sampler);
181   ureg_END(shader);
182
183   return ureg_create_shader_and_destroy(shader, c->pipe);
184}
185
186static bool
187init_shaders(struct vl_compositor *c)
188{
189   assert(c);
190
191   c->vs = create_vert_shader(c);
192   if (!c->vs) {
193      debug_printf("Unable to create vertex shader.\n");
194      return false;
195   }
196
197   c->fs_video_buffer = create_frag_shader_video_buffer(c);
198   if (!c->fs_video_buffer) {
199      debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
200      return false;
201   }
202
203   c->fs_palette = create_frag_shader_palette(c);
204   if (!c->fs_palette) {
205      debug_printf("Unable to create Palette-to-RGB fragment shader.\n");
206      return false;
207   }
208
209   c->fs_rgba = create_frag_shader_rgba(c);
210   if (!c->fs_rgba) {
211      debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
212      return false;
213   }
214
215   return true;
216}
217
218static void cleanup_shaders(struct vl_compositor *c)
219{
220   assert(c);
221
222   c->pipe->delete_vs_state(c->pipe, c->vs);
223   c->pipe->delete_fs_state(c->pipe, c->fs_video_buffer);
224   c->pipe->delete_fs_state(c->pipe, c->fs_palette);
225   c->pipe->delete_fs_state(c->pipe, c->fs_rgba);
226}
227
228static bool
229init_pipe_state(struct vl_compositor *c)
230{
231   struct pipe_rasterizer_state rast;
232   struct pipe_sampler_state sampler;
233   struct pipe_blend_state blend;
234
235   assert(c);
236
237   c->fb_state.nr_cbufs = 1;
238   c->fb_state.zsbuf = NULL;
239
240   c->viewport.scale[2] = 1;
241   c->viewport.scale[3] = 1;
242   c->viewport.translate[0] = 0;
243   c->viewport.translate[1] = 0;
244   c->viewport.translate[2] = 0;
245   c->viewport.translate[3] = 0;
246
247   memset(&sampler, 0, sizeof(sampler));
248   sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
249   sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
250   sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
251   sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
252   sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
253   sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
254   sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
255   sampler.compare_func = PIPE_FUNC_ALWAYS;
256   sampler.normalized_coords = 1;
257
258   c->sampler_linear = c->pipe->create_sampler_state(c->pipe, &sampler);
259
260   sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
261   sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
262   c->sampler_nearest = c->pipe->create_sampler_state(c->pipe, &sampler);
263
264   memset(&blend, 0, sizeof blend);
265   blend.independent_blend_enable = 0;
266   blend.rt[0].blend_enable = 1;
267   blend.rt[0].rgb_func = PIPE_BLEND_ADD;
268   blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
269   blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
270   blend.rt[0].alpha_func = PIPE_BLEND_ADD;
271   blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
272   blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
273   blend.logicop_enable = 0;
274   blend.logicop_func = PIPE_LOGICOP_CLEAR;
275   blend.rt[0].colormask = PIPE_MASK_RGBA;
276   blend.dither = 0;
277   c->blend = c->pipe->create_blend_state(c->pipe, &blend);
278
279   memset(&rast, 0, sizeof rast);
280   rast.flatshade = 1;
281   rast.front_ccw = 1;
282   rast.cull_face = PIPE_FACE_NONE;
283   rast.fill_back = PIPE_POLYGON_MODE_FILL;
284   rast.fill_front = PIPE_POLYGON_MODE_FILL;
285   rast.scissor = 1;
286   rast.line_width = 1;
287   rast.point_size_per_vertex = 1;
288   rast.offset_units = 1;
289   rast.offset_scale = 1;
290   rast.gl_rasterization_rules = 1;
291
292   c->rast = c->pipe->create_rasterizer_state(c->pipe, &rast);
293
294   return true;
295}
296
297static void cleanup_pipe_state(struct vl_compositor *c)
298{
299   assert(c);
300
301   c->pipe->delete_sampler_state(c->pipe, c->sampler_linear);
302   c->pipe->delete_sampler_state(c->pipe, c->sampler_nearest);
303   c->pipe->delete_blend_state(c->pipe, c->blend);
304   c->pipe->delete_rasterizer_state(c->pipe, c->rast);
305}
306
307static bool
308create_vertex_buffer(struct vl_compositor *c)
309{
310   assert(c);
311
312   pipe_resource_reference(&c->vertex_buf.buffer, NULL);
313   c->vertex_buf.buffer = pipe_buffer_create
314   (
315      c->pipe->screen,
316      PIPE_BIND_VERTEX_BUFFER,
317      PIPE_USAGE_STREAM,
318      sizeof(struct vertex4f) * VL_COMPOSITOR_MAX_LAYERS * 4
319   );
320   return c->vertex_buf.buffer != NULL;
321}
322
323static bool
324init_buffers(struct vl_compositor *c)
325{
326   struct pipe_vertex_element vertex_elems[2];
327
328   assert(c);
329
330   /*
331    * Create our vertex buffer and vertex buffer elements
332    */
333   c->vertex_buf.stride = sizeof(struct vertex4f);
334   c->vertex_buf.buffer_offset = 0;
335   create_vertex_buffer(c);
336
337   vertex_elems[0].src_offset = 0;
338   vertex_elems[0].instance_divisor = 0;
339   vertex_elems[0].vertex_buffer_index = 0;
340   vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
341   vertex_elems[1].src_offset = sizeof(struct vertex2f);
342   vertex_elems[1].instance_divisor = 0;
343   vertex_elems[1].vertex_buffer_index = 0;
344   vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
345   c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 2, vertex_elems);
346
347   /*
348    * Create our fragment shader's constant buffer
349    * Const buffer contains the color conversion matrix and bias vectors
350    */
351   /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
352   c->csc_matrix = pipe_buffer_create
353   (
354      c->pipe->screen,
355      PIPE_BIND_CONSTANT_BUFFER,
356      PIPE_USAGE_STATIC,
357      sizeof(csc_matrix)
358   );
359
360   return true;
361}
362
363static void
364cleanup_buffers(struct vl_compositor *c)
365{
366   assert(c);
367
368   c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
369   pipe_resource_reference(&c->vertex_buf.buffer, NULL);
370   pipe_resource_reference(&c->csc_matrix, NULL);
371}
372
373static INLINE struct pipe_video_rect
374default_rect(struct vl_compositor_layer *layer)
375{
376   struct pipe_resource *res = layer->sampler_views[0]->texture;
377   struct pipe_video_rect rect = { 0, 0, res->width0, res->height0 };
378   return rect;
379}
380
381static INLINE struct vertex2f
382calc_topleft(struct vertex2f size, struct pipe_video_rect rect)
383{
384   struct vertex2f res = { rect.x / size.x, rect.y / size.y };
385   return res;
386}
387
388static INLINE struct vertex2f
389calc_bottomright(struct vertex2f size, struct pipe_video_rect rect)
390{
391   struct vertex2f res = { (rect.x + rect.w) / size.x, (rect.y + rect.h) / size.y };
392   return res;
393}
394
395static INLINE void
396calc_src_and_dst(struct vl_compositor_layer *layer, unsigned width, unsigned height,
397                 struct pipe_video_rect src, struct pipe_video_rect dst)
398{
399   struct vertex2f size =  { width, height };
400
401   layer->src.tl = calc_topleft(size, src);
402   layer->src.br = calc_bottomright(size, src);
403   layer->dst.tl = calc_topleft(size, dst);
404   layer->dst.br = calc_bottomright(size, dst);
405}
406
407static void
408gen_rect_verts(struct vertex4f *vb, struct vl_compositor_layer *layer)
409{
410   assert(vb && layer);
411
412   vb[0].x = layer->dst.tl.x;
413   vb[0].y = layer->dst.tl.y;
414   vb[0].z = layer->src.tl.x;
415   vb[0].w = layer->src.tl.y;
416
417   vb[1].x = layer->dst.br.x;
418   vb[1].y = layer->dst.tl.y;
419   vb[1].z = layer->src.br.x;
420   vb[1].w = layer->src.tl.y;
421
422   vb[2].x = layer->dst.br.x;
423   vb[2].y = layer->dst.br.y;
424   vb[2].z = layer->src.br.x;
425   vb[2].w = layer->src.br.y;
426
427   vb[3].x = layer->dst.tl.x;
428   vb[3].y = layer->dst.br.y;
429   vb[3].z = layer->src.tl.x;
430   vb[3].w = layer->src.br.y;
431}
432
433static void
434gen_vertex_data(struct vl_compositor *c)
435{
436   struct vertex4f *vb;
437   struct pipe_transfer *buf_transfer;
438   unsigned i;
439
440   assert(c);
441
442   vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
443                        PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD | PIPE_TRANSFER_DONTBLOCK,
444                        &buf_transfer);
445
446   if (!vb) {
447      // If buffer is still locked from last draw create a new one
448      create_vertex_buffer(c);
449      vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
450                           PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
451                           &buf_transfer);
452   }
453
454   for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
455      if (c->used_layers & (1 << i)) {
456         struct vl_compositor_layer *layer = &c->layers[i];
457         gen_rect_verts(vb, layer);
458         vb += 4;
459
460         if (layer->clearing &&
461             c->dirty_tl.x >= layer->dst.tl.x &&
462             c->dirty_tl.y >= layer->dst.tl.y &&
463             c->dirty_br.x <= layer->dst.br.x &&
464             c->dirty_br.y <= layer->dst.br.y) {
465
466            // We clear the dirty area anyway, no need for clear_render_target
467            c->dirty_tl.x = c->dirty_tl.y = 1.0f;
468            c->dirty_br.x = c->dirty_br.y = 0.0f;
469         }
470      }
471   }
472
473   pipe_buffer_unmap(c->pipe, buf_transfer);
474}
475
476static void
477draw_layers(struct vl_compositor *c)
478{
479   unsigned vb_index, i;
480
481   assert(c);
482
483   for (i = 0, vb_index = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
484      if (c->used_layers & (1 << i)) {
485         struct vl_compositor_layer *layer = &c->layers[i];
486         struct pipe_sampler_view **samplers = &layer->sampler_views[0];
487         unsigned num_sampler_views = !samplers[1] ? 1 : !samplers[2] ? 2 : 3;
488
489         c->pipe->bind_fs_state(c->pipe, layer->fs);
490         c->pipe->bind_fragment_sampler_states(c->pipe, num_sampler_views, layer->samplers);
491         c->pipe->set_fragment_sampler_views(c->pipe, num_sampler_views, samplers);
492         util_draw_arrays(c->pipe, PIPE_PRIM_QUADS, vb_index * 4, 4);
493         vb_index++;
494
495         // Remember the currently drawn area as dirty for the next draw command
496         c->dirty_tl.x = MIN2(layer->dst.tl.x, c->dirty_tl.x);
497         c->dirty_tl.y = MIN2(layer->dst.tl.y, c->dirty_tl.y);
498         c->dirty_br.x = MAX2(layer->dst.br.x, c->dirty_br.x);
499         c->dirty_br.y = MAX2(layer->dst.br.y, c->dirty_br.y);
500      }
501   }
502}
503
504void
505vl_compositor_reset_dirty_area(struct vl_compositor *c)
506{
507   assert(c);
508
509   c->dirty_tl.x = c->dirty_tl.y = 0.0f;
510   c->dirty_br.x = c->dirty_br.y = 1.0f;
511}
512
513void
514vl_compositor_set_clear_color(struct vl_compositor *c, float color[4])
515{
516   unsigned i;
517
518   assert(c);
519
520   for (i = 0; i < 4; ++i)
521      c->clear_color[i] = color[i];
522}
523
524void
525vl_compositor_clear_layers(struct vl_compositor *c)
526{
527   unsigned i, j;
528
529   assert(c);
530
531   c->used_layers = 0;
532   for ( i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
533      c->layers[i].fs = NULL;
534      for ( j = 0; j < 3; j++)
535         pipe_sampler_view_reference(&c->layers[i].sampler_views[j], NULL);
536   }
537}
538
539void
540vl_compositor_cleanup(struct vl_compositor *c)
541{
542   assert(c);
543
544   vl_compositor_clear_layers(c);
545
546   cleanup_buffers(c);
547   cleanup_shaders(c);
548   cleanup_pipe_state(c);
549}
550
551void
552vl_compositor_set_csc_matrix(struct vl_compositor *c, const float matrix[16])
553{
554   struct pipe_transfer *buf_transfer;
555
556   assert(c);
557
558   memcpy
559   (
560      pipe_buffer_map(c->pipe, c->csc_matrix,
561                      PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
562                      &buf_transfer),
563		matrix,
564		sizeof(csc_matrix)
565   );
566
567   pipe_buffer_unmap(c->pipe, buf_transfer);
568}
569
570void
571vl_compositor_set_buffer_layer(struct vl_compositor *c,
572                               unsigned layer,
573                               struct pipe_video_buffer *buffer,
574                               struct pipe_video_rect *src_rect,
575                               struct pipe_video_rect *dst_rect)
576{
577   struct pipe_sampler_view **sampler_views;
578   unsigned i;
579
580   assert(c && buffer);
581
582   assert(layer < VL_COMPOSITOR_MAX_LAYERS);
583
584   c->used_layers |= 1 << layer;
585   c->layers[layer].clearing = true;
586   c->layers[layer].fs = c->fs_video_buffer;
587
588   sampler_views = buffer->get_sampler_view_components(buffer);
589   for (i = 0; i < 3; ++i) {
590      c->layers[layer].samplers[i] = c->sampler_linear;
591      pipe_sampler_view_reference(&c->layers[layer].sampler_views[i], sampler_views[i]);
592   }
593
594   calc_src_and_dst(&c->layers[layer], buffer->width, buffer->height,
595                    src_rect ? *src_rect : default_rect(&c->layers[layer]),
596                    dst_rect ? *dst_rect : default_rect(&c->layers[layer]));
597}
598
599void
600vl_compositor_set_palette_layer(struct vl_compositor *c,
601                                unsigned layer,
602                                struct pipe_sampler_view *indexes,
603                                struct pipe_sampler_view *palette,
604                                struct pipe_video_rect *src_rect,
605                                struct pipe_video_rect *dst_rect)
606{
607   assert(c && indexes && palette);
608
609   assert(layer < VL_COMPOSITOR_MAX_LAYERS);
610
611   c->used_layers |= 1 << layer;
612   c->layers[layer].clearing = false;
613   c->layers[layer].fs = c->fs_palette;
614   c->layers[layer].samplers[0] = c->sampler_linear;
615   c->layers[layer].samplers[1] = c->sampler_nearest;
616   c->layers[layer].samplers[2] = NULL;
617   pipe_sampler_view_reference(&c->layers[layer].sampler_views[0], indexes);
618   pipe_sampler_view_reference(&c->layers[layer].sampler_views[1], palette);
619   pipe_sampler_view_reference(&c->layers[layer].sampler_views[2], NULL);
620   calc_src_and_dst(&c->layers[layer], indexes->texture->width0, indexes->texture->height0,
621                    src_rect ? *src_rect : default_rect(&c->layers[layer]),
622                    dst_rect ? *dst_rect : default_rect(&c->layers[layer]));
623
624}
625
626void
627vl_compositor_set_rgba_layer(struct vl_compositor *c,
628                             unsigned layer,
629                             struct pipe_sampler_view *rgba,
630                             struct pipe_video_rect *src_rect,
631                             struct pipe_video_rect *dst_rect)
632{
633   assert(c && rgba);
634
635   assert(layer < VL_COMPOSITOR_MAX_LAYERS);
636
637   c->used_layers |= 1 << layer;
638   c->layers[layer].clearing = rgba->swizzle_a == PIPE_SWIZZLE_ONE;
639   c->layers[layer].fs = c->fs_rgba;
640   c->layers[layer].samplers[0] = c->sampler_linear;
641   c->layers[layer].samplers[1] = NULL;
642   c->layers[layer].samplers[2] = NULL;
643   pipe_sampler_view_reference(&c->layers[layer].sampler_views[0], rgba);
644   pipe_sampler_view_reference(&c->layers[layer].sampler_views[1], NULL);
645   pipe_sampler_view_reference(&c->layers[layer].sampler_views[2], NULL);
646   calc_src_and_dst(&c->layers[layer], rgba->texture->width0, rgba->texture->height0,
647                    src_rect ? *src_rect : default_rect(&c->layers[layer]),
648                    dst_rect ? *dst_rect : default_rect(&c->layers[layer]));
649}
650
651void
652vl_compositor_render(struct vl_compositor *c,
653                     enum pipe_mpeg12_picture_type picture_type,
654                     struct pipe_surface           *dst_surface,
655                     struct pipe_video_rect        *dst_area,
656                     struct pipe_fence_handle      **fence)
657{
658   struct pipe_scissor_state scissor;
659
660   assert(c);
661   assert(dst_surface);
662
663   c->fb_state.width = dst_surface->width;
664   c->fb_state.height = dst_surface->height;
665   c->fb_state.cbufs[0] = dst_surface;
666
667   c->viewport.scale[0] = dst_surface->width;
668   c->viewport.scale[1] = dst_surface->height;
669
670   if (dst_area) {
671      scissor.minx = dst_area->x;
672      scissor.miny = dst_area->y;
673      scissor.maxx = dst_area->x + dst_area->w;
674      scissor.maxy = dst_area->y + dst_area->h;
675   } else {
676      scissor.minx = 0;
677      scissor.miny = 0;
678      scissor.maxx = dst_surface->width;
679      scissor.maxy = dst_surface->height;
680   }
681
682   gen_vertex_data(c);
683
684   if (c->dirty_tl.x < c->dirty_br.x || c->dirty_tl.y < c->dirty_br.y) {
685      util_clear_render_target(c->pipe, dst_surface, c->clear_color, 0, 0, dst_surface->width, dst_surface->height);
686      c->dirty_tl.x = c->dirty_tl.y = 1.0f;
687      c->dirty_br.x = c->dirty_br.y = 0.0f;
688   }
689
690   c->pipe->set_scissor_state(c->pipe, &scissor);
691   c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
692   c->pipe->set_viewport_state(c->pipe, &c->viewport);
693   c->pipe->bind_vs_state(c->pipe, c->vs);
694   c->pipe->set_vertex_buffers(c->pipe, 1, &c->vertex_buf);
695   c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
696   c->pipe->set_constant_buffer(c->pipe, PIPE_SHADER_FRAGMENT, 0, c->csc_matrix);
697   c->pipe->bind_blend_state(c->pipe, c->blend);
698   c->pipe->bind_rasterizer_state(c->pipe, c->rast);
699
700   draw_layers(c);
701
702   c->pipe->flush(c->pipe, fence);
703}
704
705bool
706vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe)
707{
708   csc_matrix csc_matrix;
709
710   c->pipe = pipe;
711
712   if (!init_pipe_state(c))
713      return false;
714
715   if (!init_shaders(c)) {
716      cleanup_pipe_state(c);
717      return false;
718   }
719   if (!init_buffers(c)) {
720      cleanup_shaders(c);
721      cleanup_pipe_state(c);
722      return false;
723   }
724
725   vl_compositor_clear_layers(c);
726
727   vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, csc_matrix);
728   vl_compositor_set_csc_matrix(c, csc_matrix);
729
730   c->clear_color[0] = c->clear_color[1] = 0.0f;
731   c->clear_color[2] = c->clear_color[3] = 0.0f;
732   vl_compositor_reset_dirty_area(c);
733
734   return true;
735}
736