vl_mpeg12_decoder.c revision 2e62b30826679e9d5e1a783dc19baabec4fc8dfa
1/**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <math.h>
29#include <assert.h>
30
31#include <util/u_memory.h>
32#include <util/u_rect.h>
33#include <util/u_video.h>
34
35#include "vl_mpeg12_decoder.h"
36#include "vl_defines.h"
37
38#define SCALE_FACTOR_SNORM (32768.0f / 256.0f)
39#define SCALE_FACTOR_SSCALED (1.0f / 256.0f)
40
41struct format_config {
42   enum pipe_format zscan_source_format;
43   enum pipe_format idct_source_format;
44   enum pipe_format mc_source_format;
45
46   float idct_scale;
47   float mc_scale;
48};
49
50static const struct format_config bitstream_format_config[] = {
51   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
52   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
53   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
54   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
55};
56
57static const unsigned num_bitstream_format_configs =
58   sizeof(bitstream_format_config) / sizeof(struct format_config);
59
60static const struct format_config idct_format_config[] = {
61   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
62   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
63   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
64   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
65};
66
67static const unsigned num_idct_format_configs =
68   sizeof(idct_format_config) / sizeof(struct format_config);
69
70static const struct format_config mc_format_config[] = {
71   //{ PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SSCALED, 0.0f, SCALE_FACTOR_SSCALED },
72   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SNORM, 0.0f, SCALE_FACTOR_SNORM }
73};
74
75static const unsigned num_mc_format_configs =
76   sizeof(mc_format_config) / sizeof(struct format_config);
77
78static const unsigned const_empty_block_mask_420[3][2][2] = {
79   { { 0x20, 0x10 },  { 0x08, 0x04 } },
80   { { 0x02, 0x02 },  { 0x02, 0x02 } },
81   { { 0x01, 0x01 },  { 0x01, 0x01 } }
82};
83
84static bool
85init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
86{
87   enum pipe_format formats[3];
88
89   struct pipe_sampler_view **source;
90   struct pipe_surface **destination;
91
92   unsigned i;
93
94   assert(dec && buffer);
95
96   formats[0] = formats[1] = formats[2] = dec->zscan_source_format;
97   buffer->zscan_source = vl_video_buffer_create_ex
98   (
99      dec->base.context,
100      dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT,
101      align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line,
102      1, PIPE_VIDEO_CHROMA_FORMAT_444, formats, PIPE_USAGE_STATIC
103   );
104
105   if (!buffer->zscan_source)
106      goto error_source;
107
108   source = buffer->zscan_source->get_sampler_view_planes(buffer->zscan_source);
109   if (!source)
110      goto error_sampler;
111
112   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
113      destination = dec->idct_source->get_surfaces(dec->idct_source);
114   else
115      destination = dec->mc_source->get_surfaces(dec->mc_source);
116
117   if (!destination)
118      goto error_surface;
119
120   for (i = 0; i < VL_MAX_PLANES; ++i)
121      if (!vl_zscan_init_buffer(i == 0 ? &dec->zscan_y : &dec->zscan_c,
122                                &buffer->zscan[i], source[i], destination[i]))
123         goto error_plane;
124
125   return true;
126
127error_plane:
128   for (; i > 0; --i)
129      vl_zscan_cleanup_buffer(&buffer->zscan[i - 1]);
130
131error_surface:
132error_sampler:
133   buffer->zscan_source->destroy(buffer->zscan_source);
134
135error_source:
136   return false;
137}
138
139static void
140cleanup_zscan_buffer(struct vl_mpeg12_buffer *buffer)
141{
142   unsigned i;
143
144   assert(buffer);
145
146   for (i = 0; i < VL_MAX_PLANES; ++i)
147      vl_zscan_cleanup_buffer(&buffer->zscan[i]);
148   buffer->zscan_source->destroy(buffer->zscan_source);
149}
150
151static bool
152init_idct_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
153{
154   struct pipe_sampler_view **idct_source_sv, **mc_source_sv;
155
156   unsigned i;
157
158   assert(dec && buffer);
159
160   idct_source_sv = dec->idct_source->get_sampler_view_planes(dec->idct_source);
161   if (!idct_source_sv)
162      goto error_source_sv;
163
164   mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
165   if (!mc_source_sv)
166      goto error_mc_source_sv;
167
168   for (i = 0; i < 3; ++i)
169      if (!vl_idct_init_buffer(i == 0 ? &dec->idct_y : &dec->idct_c,
170                               &buffer->idct[i], idct_source_sv[i],
171                               mc_source_sv[i]))
172         goto error_plane;
173
174   return true;
175
176error_plane:
177   for (; i > 0; --i)
178      vl_idct_cleanup_buffer(&buffer->idct[i - 1]);
179
180error_mc_source_sv:
181error_source_sv:
182   return false;
183}
184
185static void
186cleanup_idct_buffer(struct vl_mpeg12_buffer *buf)
187{
188   unsigned i;
189
190   assert(buf);
191
192   for (i = 0; i < 3; ++i)
193      vl_idct_cleanup_buffer(&buf->idct[0]);
194}
195
196static bool
197init_mc_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buf)
198{
199   assert(dec && buf);
200
201   if(!vl_mc_init_buffer(&dec->mc_y, &buf->mc[0]))
202      goto error_mc_y;
203
204   if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[1]))
205      goto error_mc_cb;
206
207   if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[2]))
208      goto error_mc_cr;
209
210   return true;
211
212error_mc_cr:
213   vl_mc_cleanup_buffer(&buf->mc[1]);
214
215error_mc_cb:
216   vl_mc_cleanup_buffer(&buf->mc[0]);
217
218error_mc_y:
219   return false;
220}
221
222static void
223cleanup_mc_buffer(struct vl_mpeg12_buffer *buf)
224{
225   unsigned i;
226
227   assert(buf);
228
229   for (i = 0; i < VL_MAX_PLANES; ++i)
230      vl_mc_cleanup_buffer(&buf->mc[i]);
231}
232
233static inline void
234MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock *mb, unsigned weights[2])
235{
236   assert(mb);
237
238   switch (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
239   case PIPE_MPEG12_MB_TYPE_MOTION_FORWARD:
240      weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
241      weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
242      break;
243
244   case (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD):
245      weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
246      weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
247      break;
248
249   case PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD:
250      weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
251      weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
252      break;
253
254   default:
255      if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_PATTERN) {
256         /* patern without a motion vector, just copy the old frame content */
257         weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
258         weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
259      } else {
260         weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
261         weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
262      }
263      break;
264   }
265}
266
267static inline struct vl_motionvector
268MotionVectorToPipe(const struct pipe_mpeg12_macroblock *mb, unsigned vector,
269                   unsigned field_select_mask, unsigned weight)
270{
271   struct vl_motionvector mv;
272
273   assert(mb);
274
275   if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
276      switch (mb->macroblock_modes.bits.frame_motion_type) {
277      case PIPE_MPEG12_MO_TYPE_FRAME:
278         mv.top.x = mb->PMV[0][vector][0];
279         mv.top.y = mb->PMV[0][vector][1];
280         mv.top.field_select = PIPE_VIDEO_FRAME;
281         mv.top.weight = weight;
282
283         mv.bottom.x = mb->PMV[0][vector][0];
284         mv.bottom.y = mb->PMV[0][vector][1];
285         mv.bottom.weight = weight;
286         mv.bottom.field_select = PIPE_VIDEO_FRAME;
287         break;
288
289      case PIPE_MPEG12_MO_TYPE_FIELD:
290         mv.top.x = mb->PMV[0][vector][0];
291         mv.top.y = mb->PMV[0][vector][1];
292         mv.top.field_select = (mb->motion_vertical_field_select & field_select_mask) ?
293            PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
294         mv.top.weight = weight;
295
296         mv.bottom.x = mb->PMV[1][vector][0];
297         mv.bottom.y = mb->PMV[1][vector][1];
298         mv.bottom.field_select = (mb->motion_vertical_field_select & (field_select_mask << 2)) ?
299            PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
300         mv.bottom.weight = weight;
301         break;
302
303      default: // TODO: Support DUALPRIME and 16x8
304         break;
305      }
306   } else {
307      mv.top.x = mv.top.y = 0;
308      mv.top.field_select = PIPE_VIDEO_FRAME;
309      mv.top.weight = weight;
310
311      mv.bottom.x = mv.bottom.y = 0;
312      mv.bottom.field_select = PIPE_VIDEO_FRAME;
313      mv.bottom.weight = weight;
314   }
315   return mv;
316}
317
318static inline void
319UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec,
320                  struct vl_mpeg12_buffer *buf,
321                  const struct pipe_mpeg12_macroblock *mb)
322{
323   unsigned intra;
324   unsigned tb, x, y, luma_blocks;
325   short *blocks;
326
327   assert(dec && buf);
328   assert(mb);
329
330   if (!mb->coded_block_pattern)
331      return;
332
333   blocks = mb->blocks;
334   intra = mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA ? 1 : 0;
335
336   for (y = 0, luma_blocks = 0; y < 2; ++y) {
337      for (x = 0; x < 2; ++x, ++tb) {
338         if (mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
339
340            struct vl_ycbcr_block *stream = buf->ycbcr_stream[0];
341            stream->x = mb->x * 2 + x;
342            stream->y = mb->y * 2 + y;
343            stream->intra = intra;
344            stream->coding = mb->macroblock_modes.bits.dct_type;
345
346            buf->num_ycbcr_blocks[0]++;
347            buf->ycbcr_stream[0]++;
348
349            luma_blocks++;
350         }
351      }
352   }
353
354   if (luma_blocks > 0) {
355      memcpy(buf->texels[0], blocks, 64 * sizeof(short) * luma_blocks);
356      buf->texels[0] += 64 * luma_blocks;
357      blocks += 64 * luma_blocks;
358   }
359
360   /* TODO: Implement 422, 444 */
361   //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
362
363   for (tb = 1; tb < 3; ++tb) {
364      if (mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
365
366         struct vl_ycbcr_block *stream = buf->ycbcr_stream[tb];
367         stream->x = mb->x;
368         stream->y = mb->y;
369         stream->intra = intra;
370         stream->coding = 0;
371
372         buf->num_ycbcr_blocks[tb]++;
373         buf->ycbcr_stream[tb]++;
374
375         memcpy(buf->texels[tb], blocks, 64 * sizeof(short));
376         buf->texels[tb] += 64;
377         blocks += 64;
378      }
379   }
380}
381
382static void
383vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
384{
385   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
386
387   assert(decoder);
388
389   /* Asserted in softpipe_delete_fs_state() for some reason */
390   dec->base.context->bind_vs_state(dec->base.context, NULL);
391   dec->base.context->bind_fs_state(dec->base.context, NULL);
392
393   dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa);
394   dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr);
395
396   vl_mc_cleanup(&dec->mc_y);
397   vl_mc_cleanup(&dec->mc_c);
398   dec->mc_source->destroy(dec->mc_source);
399
400   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
401      vl_idct_cleanup(&dec->idct_y);
402      vl_idct_cleanup(&dec->idct_c);
403      dec->idct_source->destroy(dec->idct_source);
404   }
405
406   vl_zscan_cleanup(&dec->zscan_y);
407   vl_zscan_cleanup(&dec->zscan_c);
408
409   dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
410   dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv);
411
412   pipe_resource_reference(&dec->quads.buffer, NULL);
413   pipe_resource_reference(&dec->pos.buffer, NULL);
414   pipe_resource_reference(&dec->block_num.buffer, NULL);
415
416   pipe_sampler_view_reference(&dec->zscan_linear, NULL);
417   pipe_sampler_view_reference(&dec->zscan_normal, NULL);
418   pipe_sampler_view_reference(&dec->zscan_alternate, NULL);
419
420   FREE(dec);
421}
422
423static void *
424vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
425{
426   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
427   struct vl_mpeg12_buffer *buffer;
428
429   assert(dec);
430
431   buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
432   if (buffer == NULL)
433      return NULL;
434
435   if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
436                   dec->base.width / MACROBLOCK_WIDTH,
437                   dec->base.height / MACROBLOCK_HEIGHT))
438      goto error_vertex_buffer;
439
440   if (!init_mc_buffer(dec, buffer))
441      goto error_mc;
442
443   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
444      if (!init_idct_buffer(dec, buffer))
445         goto error_idct;
446
447   if (!init_zscan_buffer(dec, buffer))
448      goto error_zscan;
449
450   if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
451      vl_mpg12_bs_init(&buffer->bs,
452                       dec->base.width / MACROBLOCK_WIDTH,
453                       dec->base.height / MACROBLOCK_HEIGHT);
454
455   return buffer;
456
457error_zscan:
458   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
459      cleanup_idct_buffer(buffer);
460
461error_idct:
462   cleanup_mc_buffer(buffer);
463
464error_mc:
465   vl_vb_cleanup(&buffer->vertex_stream);
466
467error_vertex_buffer:
468   FREE(buffer);
469   return NULL;
470}
471
472static void
473vl_mpeg12_destroy_buffer(struct pipe_video_decoder *decoder, void *buffer)
474{
475   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
476   struct vl_mpeg12_buffer *buf = buffer;
477
478   assert(dec && buf);
479
480   cleanup_zscan_buffer(buf);
481
482   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
483      cleanup_idct_buffer(buf);
484
485   cleanup_mc_buffer(buf);
486
487   vl_vb_cleanup(&buf->vertex_stream);
488
489   FREE(buf);
490}
491
492static void
493vl_mpeg12_set_decode_buffer(struct pipe_video_decoder *decoder, void *buffer)
494{
495   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
496
497   assert(dec && buffer);
498
499   dec->current_buffer = buffer;
500}
501
502static void
503vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder,
504                                 struct pipe_picture_desc *picture)
505{
506   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
507   struct pipe_mpeg12_picture_desc *pic = (struct pipe_mpeg12_picture_desc *)picture;
508
509   assert(dec && pic);
510
511   dec->picture_desc = *pic;
512}
513
514static void
515vl_mpeg12_set_quant_matrix(struct pipe_video_decoder *decoder,
516                           const struct pipe_quant_matrix *matrix)
517{
518   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
519   const struct pipe_mpeg12_quant_matrix *m = (const struct pipe_mpeg12_quant_matrix *)matrix;
520
521   assert(dec);
522   assert(matrix->codec == PIPE_VIDEO_CODEC_MPEG12);
523
524   memcpy(dec->intra_matrix, m->intra_matrix, 64);
525   memcpy(dec->non_intra_matrix, m->non_intra_matrix, 64);
526}
527
528static void
529vl_mpeg12_set_decode_target(struct pipe_video_decoder *decoder,
530                            struct pipe_video_buffer *target)
531{
532   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
533   struct pipe_surface **surfaces;
534   unsigned i;
535
536   assert(dec);
537
538   surfaces = target->get_surfaces(target);
539   for (i = 0; i < VL_MAX_PLANES; ++i)
540      pipe_surface_reference(&dec->target_surfaces[i], surfaces[i]);
541}
542
543static void
544vl_mpeg12_set_reference_frames(struct pipe_video_decoder *decoder,
545                               struct pipe_video_buffer **ref_frames,
546                               unsigned num_ref_frames)
547{
548   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
549   struct pipe_sampler_view **sv;
550   unsigned i,j;
551
552   assert(dec);
553   assert(num_ref_frames <= VL_MAX_REF_FRAMES);
554
555   for (i = 0; i < num_ref_frames; ++i) {
556      sv = ref_frames[i]->get_sampler_view_planes(ref_frames[i]);
557      for (j = 0; j < VL_MAX_PLANES; ++j)
558         pipe_sampler_view_reference(&dec->ref_frames[i][j], sv[j]);
559   }
560
561   for (; i < VL_MAX_REF_FRAMES; ++i)
562      for (j = 0; j < VL_MAX_PLANES; ++j)
563         pipe_sampler_view_reference(&dec->ref_frames[i][j], NULL);
564}
565
566static void
567vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder)
568{
569   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
570
571   struct vl_mpeg12_buffer *buf;
572   struct pipe_sampler_view **sampler_views;
573   unsigned i;
574
575   assert(dec);
576
577   buf = dec->current_buffer;
578   assert(buf);
579
580   if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
581      dec->intra_matrix[0] = 1 << (7 - dec->picture_desc.intra_dc_precision);
582
583   for (i = 0; i < VL_MAX_PLANES; ++i) {
584      vl_zscan_upload_quant(&buf->zscan[i], dec->intra_matrix, true);
585      vl_zscan_upload_quant(&buf->zscan[i], dec->non_intra_matrix, false);
586   }
587
588   vl_vb_map(&buf->vertex_stream, dec->base.context);
589
590   sampler_views = buf->zscan_source->get_sampler_view_planes(buf->zscan_source);
591
592   assert(sampler_views);
593
594   for (i = 0; i < VL_MAX_PLANES; ++i) {
595      struct pipe_resource *tex = sampler_views[i]->texture;
596      struct pipe_box rect =
597      {
598         0, 0, 0,
599         tex->width0,
600         tex->height0,
601         1
602      };
603
604      buf->tex_transfer[i] = dec->base.context->get_transfer
605      (
606         dec->base.context, tex,
607         0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
608         &rect
609      );
610
611      buf->texels[i] = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer[i]);
612
613      buf->num_ycbcr_blocks[i] = 0;
614   }
615
616   for (i = 0; i < VL_MAX_PLANES; ++i)
617      buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
618
619   for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
620      buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
621
622   if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
623      vl_mpg12_bs_set_buffers(&buf->bs, buf->ycbcr_stream, buf->texels, buf->mv_stream);
624
625   } else {
626
627      for (i = 0; i < VL_MAX_PLANES; ++i)
628         vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
629   }
630}
631
632static void
633vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
634                            const struct pipe_macroblock *macroblocks,
635                            unsigned num_macroblocks)
636{
637   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
638   const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
639   struct vl_mpeg12_buffer *buf;
640
641   unsigned i, j, mv_weights[2];
642
643   assert(dec && dec->current_buffer);
644   assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
645
646   buf = dec->current_buffer;
647   assert(buf);
648
649   for (; num_macroblocks > 0; --num_macroblocks) {
650      unsigned mb_addr = mb->y * dec->width_in_macroblocks + mb->x;
651
652      if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_PATTERN | PIPE_MPEG12_MB_TYPE_INTRA))
653         UploadYcbcrBlocks(dec, buf, mb);
654
655      MacroBlockTypeToPipeWeights(mb, mv_weights);
656
657      for (i = 0; i < 2; ++i) {
658          if (!dec->ref_frames[i][0]) continue;
659
660         buf->mv_stream[i][mb_addr] = MotionVectorToPipe
661         (
662            mb, i,
663            i ? PIPE_MPEG12_FS_FIRST_BACKWARD : PIPE_MPEG12_FS_FIRST_FORWARD,
664            mv_weights[i]
665         );
666      }
667
668      /* see section 7.6.6 of the spec */
669      if (mb->num_skipped_macroblocks > 0) {
670         struct vl_motionvector skipped_mv[2];
671
672         if (dec->ref_frames[0][0] && !dec->ref_frames[1][0]) {
673            skipped_mv[0].top.x = skipped_mv[0].top.y = 0;
674            skipped_mv[0].top.weight = PIPE_VIDEO_MV_WEIGHT_MAX;
675         } else {
676           skipped_mv[0] = buf->mv_stream[0][mb_addr];
677           skipped_mv[1] = buf->mv_stream[1][mb_addr];
678         }
679         skipped_mv[0].top.field_select = PIPE_VIDEO_FRAME;
680         skipped_mv[1].top.field_select = PIPE_VIDEO_FRAME;
681
682         skipped_mv[0].bottom = skipped_mv[0].top;
683         skipped_mv[1].bottom = skipped_mv[1].top;
684
685         ++mb_addr;
686         for (i = 0; i < mb->num_skipped_macroblocks; ++i, ++mb_addr) {
687            for (j = 0; j < 2; ++j) {
688               if (!dec->ref_frames[j][0]) continue;
689               buf->mv_stream[j][mb_addr] = skipped_mv[j];
690
691            }
692         }
693      }
694
695      ++mb;
696   }
697}
698
699static void
700vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
701                           unsigned num_bytes, const void *data)
702{
703   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
704   struct vl_mpeg12_buffer *buf;
705
706   unsigned i;
707
708   assert(dec && dec->current_buffer);
709
710   buf = dec->current_buffer;
711   assert(buf);
712
713   for (i = 0; i < VL_MAX_PLANES; ++i)
714      vl_zscan_set_layout(&buf->zscan[i], dec->picture_desc.alternate_scan ?
715                          dec->zscan_alternate : dec->zscan_normal);
716
717   vl_mpg12_bs_decode(&buf->bs, num_bytes, data, &dec->picture_desc, buf->num_ycbcr_blocks);
718}
719
720static void
721vl_mpeg12_end_frame(struct pipe_video_decoder *decoder)
722{
723   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
724   struct pipe_sampler_view **mc_source_sv;
725   struct pipe_vertex_buffer vb[3];
726   struct vl_mpeg12_buffer *buf;
727
728   unsigned i, j, component;
729   unsigned nr_components;
730
731   assert(dec && dec->current_buffer);
732
733   buf = dec->current_buffer;
734
735   vl_vb_unmap(&buf->vertex_stream, dec->base.context);
736
737   for (i = 0; i < VL_MAX_PLANES; ++i) {
738      dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer[i]);
739      dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer[i]);
740   }
741
742   vb[0] = dec->quads;
743   vb[1] = dec->pos;
744
745   dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
746   for (i = 0; i < VL_MAX_PLANES; ++i) {
747      if (!dec->target_surfaces[i]) continue;
748
749      vl_mc_set_surface(&buf->mc[i], dec->target_surfaces[i]);
750
751      for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
752         if (!dec->ref_frames[j][i]) continue;
753
754         vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
755         dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
756
757         vl_mc_render_ref(&buf->mc[i], dec->ref_frames[j][i]);
758      }
759   }
760
761   vb[2] = dec->block_num;
762
763   dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
764   for (i = 0; i < VL_MAX_PLANES; ++i) {
765      if (!buf->num_ycbcr_blocks[i]) continue;
766
767      vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
768      dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
769
770      vl_zscan_render(&buf->zscan[i] , buf->num_ycbcr_blocks[i]);
771
772      if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
773         vl_idct_flush(&buf->idct[i], buf->num_ycbcr_blocks[i]);
774   }
775
776   mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
777   for (i = 0, component = 0; i < VL_MAX_PLANES; ++i) {
778      if (!dec->target_surfaces[i]) continue;
779
780      nr_components = util_format_get_nr_components(dec->target_surfaces[i]->texture->format);
781      for (j = 0; j < nr_components; ++j, ++component) {
782         if (!buf->num_ycbcr_blocks[i]) continue;
783
784         vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component);
785         dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
786
787         if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
788            vl_idct_prepare_stage2(&buf->idct[component]);
789         else {
790            dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]);
791            dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
792         }
793         vl_mc_render_ycbcr(&buf->mc[i], j, buf->num_ycbcr_blocks[component]);
794      }
795   }
796}
797
798static void
799vl_mpeg12_flush(struct pipe_video_decoder *decoder)
800{
801   assert(decoder);
802
803   //Noop, for shaders it is much faster to flush everything in end_frame
804}
805
806static bool
807init_pipe_state(struct vl_mpeg12_decoder *dec)
808{
809   struct pipe_depth_stencil_alpha_state dsa;
810   struct pipe_sampler_state sampler;
811   unsigned i;
812
813   assert(dec);
814
815   memset(&dsa, 0, sizeof dsa);
816   dsa.depth.enabled = 0;
817   dsa.depth.writemask = 0;
818   dsa.depth.func = PIPE_FUNC_ALWAYS;
819   for (i = 0; i < 2; ++i) {
820      dsa.stencil[i].enabled = 0;
821      dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
822      dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
823      dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
824      dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
825      dsa.stencil[i].valuemask = 0;
826      dsa.stencil[i].writemask = 0;
827   }
828   dsa.alpha.enabled = 0;
829   dsa.alpha.func = PIPE_FUNC_ALWAYS;
830   dsa.alpha.ref_value = 0;
831   dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa);
832   dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa);
833
834   memset(&sampler, 0, sizeof(sampler));
835   sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
836   sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
837   sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
838   sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
839   sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
840   sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
841   sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
842   sampler.compare_func = PIPE_FUNC_ALWAYS;
843   sampler.normalized_coords = 1;
844   dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler);
845   if (!dec->sampler_ycbcr)
846      return false;
847
848   return true;
849}
850
851static const struct format_config*
852find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config configs[], unsigned num_configs)
853{
854   struct pipe_screen *screen;
855   unsigned i;
856
857   assert(dec);
858
859   screen = dec->base.context->screen;
860
861   for (i = 0; i < num_configs; ++i) {
862      if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
863                                       1, PIPE_BIND_SAMPLER_VIEW))
864         continue;
865
866      if (configs[i].idct_source_format != PIPE_FORMAT_NONE) {
867         if (!screen->is_format_supported(screen, configs[i].idct_source_format, PIPE_TEXTURE_2D,
868                                          1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
869            continue;
870
871         if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_3D,
872                                          1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
873            continue;
874      } else {
875         if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_2D,
876                                          1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
877            continue;
878      }
879      return &configs[i];
880   }
881
882   return NULL;
883}
884
885static bool
886init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
887{
888   unsigned num_channels;
889
890   assert(dec);
891
892   dec->zscan_source_format = format_config->zscan_source_format;
893   dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line);
894   dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line);
895   dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line);
896
897   num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
898
899   if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height,
900                      dec->blocks_per_line, dec->num_blocks, num_channels))
901      return false;
902
903   if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height,
904                      dec->blocks_per_line, dec->num_blocks, num_channels))
905      return false;
906
907   return true;
908}
909
910static bool
911init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
912{
913   unsigned nr_of_idct_render_targets, max_inst;
914   enum pipe_format formats[3];
915
916   struct pipe_sampler_view *matrix = NULL;
917
918   nr_of_idct_render_targets = dec->base.context->screen->get_param
919   (
920      dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS
921   );
922
923   max_inst = dec->base.context->screen->get_shader_param
924   (
925      dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
926   );
927
928   // Just assume we need 32 inst per render target, not 100% true, but should work in most cases
929   if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
930      // more than 4 render targets usually doesn't makes any seens
931      nr_of_idct_render_targets = 4;
932   else
933      nr_of_idct_render_targets = 1;
934
935   formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
936   dec->idct_source = vl_video_buffer_create_ex
937   (
938      dec->base.context, dec->base.width / 4, dec->base.height, 1,
939      dec->base.chroma_format, formats, PIPE_USAGE_STATIC
940   );
941
942   if (!dec->idct_source)
943      goto error_idct_source;
944
945   formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
946   dec->mc_source = vl_video_buffer_create_ex
947   (
948      dec->base.context, dec->base.width / nr_of_idct_render_targets,
949      dec->base.height / 4, nr_of_idct_render_targets,
950      dec->base.chroma_format, formats, PIPE_USAGE_STATIC
951   );
952
953   if (!dec->mc_source)
954      goto error_mc_source;
955
956   if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale)))
957      goto error_matrix;
958
959   if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height,
960                     nr_of_idct_render_targets, matrix, matrix))
961      goto error_y;
962
963   if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height,
964                    nr_of_idct_render_targets, matrix, matrix))
965      goto error_c;
966
967   pipe_sampler_view_reference(&matrix, NULL);
968
969   return true;
970
971error_c:
972   vl_idct_cleanup(&dec->idct_y);
973
974error_y:
975   pipe_sampler_view_reference(&matrix, NULL);
976
977error_matrix:
978   dec->mc_source->destroy(dec->mc_source);
979
980error_mc_source:
981   dec->idct_source->destroy(dec->idct_source);
982
983error_idct_source:
984   return false;
985}
986
987static bool
988init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
989{
990   enum pipe_format formats[3];
991
992   formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
993   dec->mc_source = vl_video_buffer_create_ex
994   (
995      dec->base.context, dec->base.width, dec->base.height, 1,
996      dec->base.chroma_format, formats, PIPE_USAGE_STATIC
997   );
998
999   return dec->mc_source != NULL;
1000}
1001
1002static void
1003mc_vert_shader_callback(void *priv, struct vl_mc *mc,
1004                        struct ureg_program *shader,
1005                        unsigned first_output,
1006                        struct ureg_dst tex)
1007{
1008   struct vl_mpeg12_decoder *dec = priv;
1009   struct ureg_dst o_vtex;
1010
1011   assert(priv && mc);
1012   assert(shader);
1013
1014   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1015      struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1016      vl_idct_stage2_vert_shader(idct, shader, first_output, tex);
1017   } else {
1018      o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output);
1019      ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_XY), ureg_src(tex));
1020   }
1021}
1022
1023static void
1024mc_frag_shader_callback(void *priv, struct vl_mc *mc,
1025                        struct ureg_program *shader,
1026                        unsigned first_input,
1027                        struct ureg_dst dst)
1028{
1029   struct vl_mpeg12_decoder *dec = priv;
1030   struct ureg_src src, sampler;
1031
1032   assert(priv && mc);
1033   assert(shader);
1034
1035   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1036      struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1037      vl_idct_stage2_frag_shader(idct, shader, first_input, dst);
1038   } else {
1039      src = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input, TGSI_INTERPOLATE_LINEAR);
1040      sampler = ureg_DECL_sampler(shader, 0);
1041      ureg_TEX(shader, dst, TGSI_TEXTURE_2D, src, sampler);
1042   }
1043}
1044
1045struct pipe_video_decoder *
1046vl_create_mpeg12_decoder(struct pipe_context *context,
1047                         enum pipe_video_profile profile,
1048                         enum pipe_video_entrypoint entrypoint,
1049                         enum pipe_video_chroma_format chroma_format,
1050                         unsigned width, unsigned height)
1051{
1052   const unsigned block_size_pixels = BLOCK_WIDTH * BLOCK_HEIGHT;
1053   const struct format_config *format_config;
1054   struct vl_mpeg12_decoder *dec;
1055
1056   assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
1057
1058   dec = CALLOC_STRUCT(vl_mpeg12_decoder);
1059
1060   if (!dec)
1061      return NULL;
1062
1063   dec->base.context = context;
1064   dec->base.profile = profile;
1065   dec->base.entrypoint = entrypoint;
1066   dec->base.chroma_format = chroma_format;
1067   dec->base.width = width;
1068   dec->base.height = height;
1069
1070   dec->base.destroy = vl_mpeg12_destroy;
1071   dec->base.create_buffer = vl_mpeg12_create_buffer;
1072   dec->base.destroy_buffer = vl_mpeg12_destroy_buffer;
1073   dec->base.set_decode_buffer = vl_mpeg12_set_decode_buffer;
1074   dec->base.set_picture_parameters = vl_mpeg12_set_picture_parameters;
1075   dec->base.set_quant_matrix = vl_mpeg12_set_quant_matrix;
1076   dec->base.set_decode_target = vl_mpeg12_set_decode_target;
1077   dec->base.set_reference_frames = vl_mpeg12_set_reference_frames;
1078   dec->base.begin_frame = vl_mpeg12_begin_frame;
1079   dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
1080   dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
1081   dec->base.end_frame = vl_mpeg12_end_frame;
1082   dec->base.flush = vl_mpeg12_flush;
1083
1084   dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
1085   dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
1086   dec->width_in_macroblocks = align(dec->base.width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
1087
1088   dec->quads = vl_vb_upload_quads(dec->base.context);
1089   dec->pos = vl_vb_upload_pos(
1090      dec->base.context,
1091      dec->base.width / MACROBLOCK_WIDTH,
1092      dec->base.height / MACROBLOCK_HEIGHT
1093   );
1094   dec->block_num = vl_vb_upload_block_num(dec->base.context, dec->num_blocks);
1095
1096   dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
1097   dec->ves_mv = vl_vb_get_ves_mv(dec->base.context);
1098
1099   /* TODO: Implement 422, 444 */
1100   assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
1101
1102   if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
1103      dec->chroma_width = dec->base.width / 2;
1104      dec->chroma_height = dec->base.height / 2;
1105   } else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
1106      dec->chroma_width = dec->base.width;
1107      dec->chroma_height = dec->base.height / 2;
1108   } else {
1109      dec->chroma_width = dec->base.width;
1110      dec->chroma_height = dec->base.height;
1111   }
1112
1113   switch (entrypoint) {
1114   case PIPE_VIDEO_ENTRYPOINT_BITSTREAM:
1115      format_config = find_format_config(dec, bitstream_format_config, num_bitstream_format_configs);
1116      break;
1117
1118   case PIPE_VIDEO_ENTRYPOINT_IDCT:
1119      format_config = find_format_config(dec, idct_format_config, num_idct_format_configs);
1120      break;
1121
1122   case PIPE_VIDEO_ENTRYPOINT_MC:
1123      format_config = find_format_config(dec, mc_format_config, num_mc_format_configs);
1124      break;
1125
1126   default:
1127      assert(0);
1128      return NULL;
1129   }
1130
1131   if (!format_config)
1132      return NULL;
1133
1134   if (!init_zscan(dec, format_config))
1135      goto error_zscan;
1136
1137   if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1138      if (!init_idct(dec, format_config))
1139         goto error_sources;
1140   } else {
1141      if (!init_mc_source_widthout_idct(dec, format_config))
1142         goto error_sources;
1143   }
1144
1145   if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
1146                   MACROBLOCK_HEIGHT, format_config->mc_scale,
1147                   mc_vert_shader_callback, mc_frag_shader_callback, dec))
1148      goto error_mc_y;
1149
1150   // TODO
1151   if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
1152                   BLOCK_HEIGHT, format_config->mc_scale,
1153                   mc_vert_shader_callback, mc_frag_shader_callback, dec))
1154      goto error_mc_c;
1155
1156   if (!init_pipe_state(dec))
1157      goto error_pipe_state;
1158
1159   memset(dec->intra_matrix, 0x10, 64);
1160   memset(dec->non_intra_matrix, 0x10, 64);
1161
1162   return &dec->base;
1163
1164error_pipe_state:
1165   vl_mc_cleanup(&dec->mc_c);
1166
1167error_mc_c:
1168   vl_mc_cleanup(&dec->mc_y);
1169
1170error_mc_y:
1171   if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1172      vl_idct_cleanup(&dec->idct_y);
1173      vl_idct_cleanup(&dec->idct_c);
1174      dec->idct_source->destroy(dec->idct_source);
1175   }
1176   dec->mc_source->destroy(dec->mc_source);
1177
1178error_sources:
1179   vl_zscan_cleanup(&dec->zscan_y);
1180   vl_zscan_cleanup(&dec->zscan_c);
1181
1182error_zscan:
1183   FREE(dec);
1184   return NULL;
1185}
1186