vl_mpeg12_decoder.c revision bce506ffc09c44552c3d1053c6a0450b8f010292
1/**************************************************************************
2 *
3 * Copyright 2009 Younes Manton.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <math.h>
29#include <assert.h>
30
31#include "util/u_memory.h"
32#include "util/u_rect.h"
33#include "util/u_sampler.h"
34#include "util/u_video.h"
35
36#include "vl_mpeg12_decoder.h"
37#include "vl_defines.h"
38
39#define SCALE_FACTOR_SNORM (32768.0f / 256.0f)
40#define SCALE_FACTOR_SSCALED (1.0f / 256.0f)
41
42struct format_config {
43   enum pipe_format zscan_source_format;
44   enum pipe_format idct_source_format;
45   enum pipe_format mc_source_format;
46
47   float idct_scale;
48   float mc_scale;
49};
50
51static const struct format_config bitstream_format_config[] = {
52//   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
53//   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
54   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
55   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
56};
57
58static const unsigned num_bitstream_format_configs =
59   sizeof(bitstream_format_config) / sizeof(struct format_config);
60
61static const struct format_config idct_format_config[] = {
62//   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
63//   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
64   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
65   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
66};
67
68static const unsigned num_idct_format_configs =
69   sizeof(idct_format_config) / sizeof(struct format_config);
70
71static const struct format_config mc_format_config[] = {
72   //{ PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SSCALED, 0.0f, SCALE_FACTOR_SSCALED },
73   { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SNORM, 0.0f, SCALE_FACTOR_SNORM }
74};
75
76static const unsigned num_mc_format_configs =
77   sizeof(mc_format_config) / sizeof(struct format_config);
78
79static const unsigned const_empty_block_mask_420[3][2][2] = {
80   { { 0x20, 0x10 },  { 0x08, 0x04 } },
81   { { 0x02, 0x02 },  { 0x02, 0x02 } },
82   { { 0x01, 0x01 },  { 0x01, 0x01 } }
83};
84
85static bool
86init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
87{
88   struct pipe_resource *res, res_tmpl;
89   struct pipe_sampler_view sv_tmpl;
90   struct pipe_surface **destination;
91
92   unsigned i;
93
94   assert(dec && buffer);
95
96   memset(&res_tmpl, 0, sizeof(res_tmpl));
97   res_tmpl.target = PIPE_TEXTURE_2D;
98   res_tmpl.format = dec->zscan_source_format;
99   res_tmpl.width0 = dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT;
100   res_tmpl.height0 = align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line;
101   res_tmpl.depth0 = 1;
102   res_tmpl.array_size = 1;
103   res_tmpl.usage = PIPE_USAGE_STREAM;
104   res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
105
106   res = dec->base.context->screen->resource_create(dec->base.context->screen, &res_tmpl);
107   if (!res)
108      goto error_source;
109
110
111   memset(&sv_tmpl, 0, sizeof(sv_tmpl));
112   u_sampler_view_default_template(&sv_tmpl, res, res->format);
113   sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = PIPE_SWIZZLE_RED;
114   buffer->zscan_source = dec->base.context->create_sampler_view(dec->base.context, res, &sv_tmpl);
115   pipe_resource_reference(&res, NULL);
116   if (!buffer->zscan_source)
117      goto error_sampler;
118
119   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
120      destination = dec->idct_source->get_surfaces(dec->idct_source);
121   else
122      destination = dec->mc_source->get_surfaces(dec->mc_source);
123
124   if (!destination)
125      goto error_surface;
126
127   for (i = 0; i < VL_MAX_PLANES; ++i)
128      if (!vl_zscan_init_buffer(i == 0 ? &dec->zscan_y : &dec->zscan_c,
129                                &buffer->zscan[i], buffer->zscan_source, destination[i]))
130         goto error_plane;
131
132   return true;
133
134error_plane:
135   for (; i > 0; --i)
136      vl_zscan_cleanup_buffer(&buffer->zscan[i - 1]);
137
138error_surface:
139error_sampler:
140   pipe_sampler_view_reference(&buffer->zscan_source, NULL);
141
142error_source:
143   return false;
144}
145
146static void
147cleanup_zscan_buffer(struct vl_mpeg12_buffer *buffer)
148{
149   unsigned i;
150
151   assert(buffer);
152
153   for (i = 0; i < VL_MAX_PLANES; ++i)
154      vl_zscan_cleanup_buffer(&buffer->zscan[i]);
155
156   pipe_sampler_view_reference(&buffer->zscan_source, NULL);
157}
158
159static bool
160init_idct_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
161{
162   struct pipe_sampler_view **idct_source_sv, **mc_source_sv;
163
164   unsigned i;
165
166   assert(dec && buffer);
167
168   idct_source_sv = dec->idct_source->get_sampler_view_planes(dec->idct_source);
169   if (!idct_source_sv)
170      goto error_source_sv;
171
172   mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
173   if (!mc_source_sv)
174      goto error_mc_source_sv;
175
176   for (i = 0; i < 3; ++i)
177      if (!vl_idct_init_buffer(i == 0 ? &dec->idct_y : &dec->idct_c,
178                               &buffer->idct[i], idct_source_sv[i],
179                               mc_source_sv[i]))
180         goto error_plane;
181
182   return true;
183
184error_plane:
185   for (; i > 0; --i)
186      vl_idct_cleanup_buffer(&buffer->idct[i - 1]);
187
188error_mc_source_sv:
189error_source_sv:
190   return false;
191}
192
193static void
194cleanup_idct_buffer(struct vl_mpeg12_buffer *buf)
195{
196   unsigned i;
197
198   assert(buf);
199
200   for (i = 0; i < 3; ++i)
201      vl_idct_cleanup_buffer(&buf->idct[0]);
202}
203
204static bool
205init_mc_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buf)
206{
207   assert(dec && buf);
208
209   if(!vl_mc_init_buffer(&dec->mc_y, &buf->mc[0]))
210      goto error_mc_y;
211
212   if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[1]))
213      goto error_mc_cb;
214
215   if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[2]))
216      goto error_mc_cr;
217
218   return true;
219
220error_mc_cr:
221   vl_mc_cleanup_buffer(&buf->mc[1]);
222
223error_mc_cb:
224   vl_mc_cleanup_buffer(&buf->mc[0]);
225
226error_mc_y:
227   return false;
228}
229
230static void
231cleanup_mc_buffer(struct vl_mpeg12_buffer *buf)
232{
233   unsigned i;
234
235   assert(buf);
236
237   for (i = 0; i < VL_MAX_PLANES; ++i)
238      vl_mc_cleanup_buffer(&buf->mc[i]);
239}
240
241static INLINE void
242MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock *mb, unsigned weights[2])
243{
244   assert(mb);
245
246   switch (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
247   case PIPE_MPEG12_MB_TYPE_MOTION_FORWARD:
248      weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
249      weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
250      break;
251
252   case (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD):
253      weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
254      weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
255      break;
256
257   case PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD:
258      weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
259      weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
260      break;
261
262   default:
263      if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_PATTERN) {
264         /* patern without a motion vector, just copy the old frame content */
265         weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
266         weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
267      } else {
268         weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
269         weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
270      }
271      break;
272   }
273}
274
275static INLINE struct vl_motionvector
276MotionVectorToPipe(const struct pipe_mpeg12_macroblock *mb, unsigned vector,
277                   unsigned field_select_mask, unsigned weight)
278{
279   struct vl_motionvector mv;
280
281   assert(mb);
282
283   if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
284      switch (mb->macroblock_modes.bits.frame_motion_type) {
285      case PIPE_MPEG12_MO_TYPE_FRAME:
286         mv.top.x = mb->PMV[0][vector][0];
287         mv.top.y = mb->PMV[0][vector][1];
288         mv.top.field_select = PIPE_VIDEO_FRAME;
289         mv.top.weight = weight;
290
291         mv.bottom.x = mb->PMV[0][vector][0];
292         mv.bottom.y = mb->PMV[0][vector][1];
293         mv.bottom.weight = weight;
294         mv.bottom.field_select = PIPE_VIDEO_FRAME;
295         break;
296
297      case PIPE_MPEG12_MO_TYPE_FIELD:
298         mv.top.x = mb->PMV[0][vector][0];
299         mv.top.y = mb->PMV[0][vector][1];
300         mv.top.field_select = (mb->motion_vertical_field_select & field_select_mask) ?
301            PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
302         mv.top.weight = weight;
303
304         mv.bottom.x = mb->PMV[1][vector][0];
305         mv.bottom.y = mb->PMV[1][vector][1];
306         mv.bottom.field_select = (mb->motion_vertical_field_select & (field_select_mask << 2)) ?
307            PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
308         mv.bottom.weight = weight;
309         break;
310
311      default: // TODO: Support DUALPRIME and 16x8
312         break;
313      }
314   } else {
315      mv.top.x = mv.top.y = 0;
316      mv.top.field_select = PIPE_VIDEO_FRAME;
317      mv.top.weight = weight;
318
319      mv.bottom.x = mv.bottom.y = 0;
320      mv.bottom.field_select = PIPE_VIDEO_FRAME;
321      mv.bottom.weight = weight;
322   }
323   return mv;
324}
325
326static INLINE void
327UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec,
328                  struct vl_mpeg12_buffer *buf,
329                  const struct pipe_mpeg12_macroblock *mb)
330{
331   unsigned intra;
332   unsigned tb, x, y, num_blocks = 0;
333
334   assert(dec && buf);
335   assert(mb);
336
337   if (!mb->coded_block_pattern)
338      return;
339
340   intra = mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA ? 1 : 0;
341
342   for (y = 0; y < 2; ++y) {
343      for (x = 0; x < 2; ++x) {
344         if (mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
345
346            struct vl_ycbcr_block *stream = buf->ycbcr_stream[0];
347            stream->x = mb->x * 2 + x;
348            stream->y = mb->y * 2 + y;
349            stream->intra = intra;
350            stream->coding = mb->macroblock_modes.bits.dct_type;
351            stream->block_num = buf->block_num++;
352
353            buf->num_ycbcr_blocks[0]++;
354            buf->ycbcr_stream[0]++;
355
356            num_blocks++;
357         }
358      }
359   }
360
361   /* TODO: Implement 422, 444 */
362   //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
363
364   for (tb = 1; tb < 3; ++tb) {
365      if (mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
366
367         struct vl_ycbcr_block *stream = buf->ycbcr_stream[tb];
368         stream->x = mb->x;
369         stream->y = mb->y;
370         stream->intra = intra;
371         stream->coding = 0;
372         stream->block_num = buf->block_num++;
373
374         buf->num_ycbcr_blocks[tb]++;
375         buf->ycbcr_stream[tb]++;
376
377         num_blocks++;
378      }
379   }
380
381   memcpy(buf->texels, mb->blocks, 64 * sizeof(short) * num_blocks);
382   buf->texels += 64 * num_blocks;
383}
384
385static void
386vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
387{
388   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
389
390   assert(decoder);
391
392   /* Asserted in softpipe_delete_fs_state() for some reason */
393   dec->base.context->bind_vs_state(dec->base.context, NULL);
394   dec->base.context->bind_fs_state(dec->base.context, NULL);
395
396   dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa);
397   dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr);
398
399   vl_mc_cleanup(&dec->mc_y);
400   vl_mc_cleanup(&dec->mc_c);
401   dec->mc_source->destroy(dec->mc_source);
402
403   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
404      vl_idct_cleanup(&dec->idct_y);
405      vl_idct_cleanup(&dec->idct_c);
406      dec->idct_source->destroy(dec->idct_source);
407   }
408
409   vl_zscan_cleanup(&dec->zscan_y);
410   vl_zscan_cleanup(&dec->zscan_c);
411
412   dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
413   dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv);
414
415   pipe_resource_reference(&dec->quads.buffer, NULL);
416   pipe_resource_reference(&dec->pos.buffer, NULL);
417
418   pipe_sampler_view_reference(&dec->zscan_linear, NULL);
419   pipe_sampler_view_reference(&dec->zscan_normal, NULL);
420   pipe_sampler_view_reference(&dec->zscan_alternate, NULL);
421
422   FREE(dec);
423}
424
425static void *
426vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
427{
428   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
429   struct vl_mpeg12_buffer *buffer;
430
431   assert(dec);
432
433   buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
434   if (buffer == NULL)
435      return NULL;
436
437   if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
438                   dec->base.width / MACROBLOCK_WIDTH,
439                   dec->base.height / MACROBLOCK_HEIGHT))
440      goto error_vertex_buffer;
441
442   if (!init_mc_buffer(dec, buffer))
443      goto error_mc;
444
445   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
446      if (!init_idct_buffer(dec, buffer))
447         goto error_idct;
448
449   if (!init_zscan_buffer(dec, buffer))
450      goto error_zscan;
451
452   if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
453      vl_mpg12_bs_init(&buffer->bs, decoder);
454
455   return buffer;
456
457error_zscan:
458   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
459      cleanup_idct_buffer(buffer);
460
461error_idct:
462   cleanup_mc_buffer(buffer);
463
464error_mc:
465   vl_vb_cleanup(&buffer->vertex_stream);
466
467error_vertex_buffer:
468   FREE(buffer);
469   return NULL;
470}
471
472static void
473vl_mpeg12_destroy_buffer(struct pipe_video_decoder *decoder, void *buffer)
474{
475   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
476   struct vl_mpeg12_buffer *buf = buffer;
477
478   assert(dec && buf);
479
480   cleanup_zscan_buffer(buf);
481
482   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
483      cleanup_idct_buffer(buf);
484
485   cleanup_mc_buffer(buf);
486
487   vl_vb_cleanup(&buf->vertex_stream);
488
489   FREE(buf);
490}
491
492static void
493vl_mpeg12_set_decode_buffer(struct pipe_video_decoder *decoder, void *buffer)
494{
495   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
496
497   assert(dec && buffer);
498
499   dec->current_buffer = buffer;
500}
501
502static void
503vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder,
504                                 struct pipe_picture_desc *picture)
505{
506   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
507   struct pipe_mpeg12_picture_desc *pic = (struct pipe_mpeg12_picture_desc *)picture;
508
509   assert(dec && pic);
510
511   dec->picture_desc = *pic;
512}
513
514static void
515vl_mpeg12_set_quant_matrix(struct pipe_video_decoder *decoder,
516                           const struct pipe_quant_matrix *matrix)
517{
518   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
519   const struct pipe_mpeg12_quant_matrix *m = (const struct pipe_mpeg12_quant_matrix *)matrix;
520
521   assert(dec);
522   assert(matrix->codec == PIPE_VIDEO_CODEC_MPEG12);
523
524   memcpy(dec->intra_matrix, m->intra_matrix, 64);
525   memcpy(dec->non_intra_matrix, m->non_intra_matrix, 64);
526}
527
528static void
529vl_mpeg12_set_decode_target(struct pipe_video_decoder *decoder,
530                            struct pipe_video_buffer *target)
531{
532   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
533   struct pipe_surface **surfaces;
534   unsigned i;
535
536   assert(dec);
537
538   surfaces = target->get_surfaces(target);
539   for (i = 0; i < VL_MAX_PLANES; ++i)
540      pipe_surface_reference(&dec->target_surfaces[i], surfaces[i]);
541}
542
543static void
544vl_mpeg12_set_reference_frames(struct pipe_video_decoder *decoder,
545                               struct pipe_video_buffer **ref_frames,
546                               unsigned num_ref_frames)
547{
548   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
549   struct pipe_sampler_view **sv;
550   unsigned i,j;
551
552   assert(dec);
553   assert(num_ref_frames <= VL_MAX_REF_FRAMES);
554
555   for (i = 0; i < num_ref_frames; ++i) {
556      sv = ref_frames[i]->get_sampler_view_planes(ref_frames[i]);
557      for (j = 0; j < VL_MAX_PLANES; ++j)
558         pipe_sampler_view_reference(&dec->ref_frames[i][j], sv[j]);
559   }
560
561   for (; i < VL_MAX_REF_FRAMES; ++i)
562      for (j = 0; j < VL_MAX_PLANES; ++j)
563         pipe_sampler_view_reference(&dec->ref_frames[i][j], NULL);
564}
565
566static void
567vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder)
568{
569   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
570   struct vl_mpeg12_buffer *buf;
571
572   struct pipe_resource *tex;
573   struct pipe_box rect = { 0, 0, 0, 1, 1, 1 };
574
575   unsigned i;
576
577   assert(dec);
578
579   buf = dec->current_buffer;
580   assert(buf);
581
582   if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
583      dec->intra_matrix[0] = 1 << (7 - dec->picture_desc.intra_dc_precision);
584
585   for (i = 0; i < VL_MAX_PLANES; ++i) {
586      struct vl_zscan *zscan = i == 0 ? &dec->zscan_y : &dec->zscan_c;
587      vl_zscan_upload_quant(zscan, &buf->zscan[i], dec->intra_matrix, true);
588      vl_zscan_upload_quant(zscan, &buf->zscan[i], dec->non_intra_matrix, false);
589   }
590
591   vl_vb_map(&buf->vertex_stream, dec->base.context);
592
593   tex = buf->zscan_source->texture;
594   rect.width = tex->width0;
595   rect.height = tex->height0;
596
597   buf->tex_transfer = dec->base.context->get_transfer
598   (
599      dec->base.context, tex,
600      0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
601      &rect
602   );
603
604   buf->block_num = 0;
605   buf->texels = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer);
606
607   for (i = 0; i < VL_MAX_PLANES; ++i) {
608      buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
609      buf->num_ycbcr_blocks[i] = 0;
610   }
611
612   for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
613      buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
614
615   if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
616      vl_mpg12_bs_set_picture_desc(&buf->bs, &dec->picture_desc);
617
618   } else {
619
620      for (i = 0; i < VL_MAX_PLANES; ++i)
621         vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
622   }
623}
624
625static void
626vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
627                            const struct pipe_macroblock *macroblocks,
628                            unsigned num_macroblocks)
629{
630   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
631   const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
632   struct vl_mpeg12_buffer *buf;
633
634   unsigned i, j, mv_weights[2];
635
636   assert(dec && dec->current_buffer);
637   assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
638
639   buf = dec->current_buffer;
640   assert(buf);
641
642   for (; num_macroblocks > 0; --num_macroblocks) {
643      unsigned mb_addr = mb->y * dec->width_in_macroblocks + mb->x;
644
645      if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_PATTERN | PIPE_MPEG12_MB_TYPE_INTRA))
646         UploadYcbcrBlocks(dec, buf, mb);
647
648      MacroBlockTypeToPipeWeights(mb, mv_weights);
649
650      for (i = 0; i < 2; ++i) {
651          if (!dec->ref_frames[i][0]) continue;
652
653         buf->mv_stream[i][mb_addr] = MotionVectorToPipe
654         (
655            mb, i,
656            i ? PIPE_MPEG12_FS_FIRST_BACKWARD : PIPE_MPEG12_FS_FIRST_FORWARD,
657            mv_weights[i]
658         );
659      }
660
661      /* see section 7.6.6 of the spec */
662      if (mb->num_skipped_macroblocks > 0) {
663         struct vl_motionvector skipped_mv[2];
664
665         if (dec->ref_frames[0][0] && !dec->ref_frames[1][0]) {
666            skipped_mv[0].top.x = skipped_mv[0].top.y = 0;
667            skipped_mv[0].top.weight = PIPE_VIDEO_MV_WEIGHT_MAX;
668         } else {
669           skipped_mv[0] = buf->mv_stream[0][mb_addr];
670           skipped_mv[1] = buf->mv_stream[1][mb_addr];
671         }
672         skipped_mv[0].top.field_select = PIPE_VIDEO_FRAME;
673         skipped_mv[1].top.field_select = PIPE_VIDEO_FRAME;
674
675         skipped_mv[0].bottom = skipped_mv[0].top;
676         skipped_mv[1].bottom = skipped_mv[1].top;
677
678         ++mb_addr;
679         for (i = 0; i < mb->num_skipped_macroblocks; ++i, ++mb_addr) {
680            for (j = 0; j < 2; ++j) {
681               if (!dec->ref_frames[j][0]) continue;
682               buf->mv_stream[j][mb_addr] = skipped_mv[j];
683
684            }
685         }
686      }
687
688      ++mb;
689   }
690}
691
692static void
693vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
694                           unsigned num_buffers,
695                           const void * const *buffers,
696                           const unsigned *sizes)
697{
698   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
699   struct vl_mpeg12_buffer *buf;
700
701   unsigned i;
702
703   assert(dec && dec->current_buffer);
704
705   buf = dec->current_buffer;
706   assert(buf);
707
708   for (i = 0; i < VL_MAX_PLANES; ++i)
709      vl_zscan_set_layout(&buf->zscan[i], dec->picture_desc.alternate_scan ?
710                          dec->zscan_alternate : dec->zscan_normal);
711
712   vl_mpg12_bs_decode(&buf->bs, num_buffers, buffers, sizes);
713}
714
715static void
716vl_mpeg12_end_frame(struct pipe_video_decoder *decoder)
717{
718   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
719   struct pipe_sampler_view **mc_source_sv;
720   struct pipe_vertex_buffer vb[3];
721   struct vl_mpeg12_buffer *buf;
722
723   unsigned i, j, component;
724   unsigned nr_components;
725
726   assert(dec && dec->current_buffer);
727
728   buf = dec->current_buffer;
729
730   vl_vb_unmap(&buf->vertex_stream, dec->base.context);
731
732   dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer);
733   dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer);
734
735   vb[0] = dec->quads;
736   vb[1] = dec->pos;
737
738   dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
739   for (i = 0; i < VL_MAX_PLANES; ++i) {
740      if (!dec->target_surfaces[i]) continue;
741
742      vl_mc_set_surface(&buf->mc[i], dec->target_surfaces[i]);
743
744      for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
745         if (!dec->ref_frames[j][i]) continue;
746
747         vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
748         dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
749
750         vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], dec->ref_frames[j][i]);
751      }
752   }
753
754   dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
755   for (i = 0; i < VL_MAX_PLANES; ++i) {
756      if (!buf->num_ycbcr_blocks[i]) continue;
757
758      vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
759      dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
760
761      vl_zscan_render(i ? &dec->zscan_c : & dec->zscan_y, &buf->zscan[i] , buf->num_ycbcr_blocks[i]);
762
763      if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
764         vl_idct_flush(i ? &dec->idct_c : &dec->idct_y, &buf->idct[i], buf->num_ycbcr_blocks[i]);
765   }
766
767   mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
768   for (i = 0, component = 0; i < VL_MAX_PLANES; ++i) {
769      if (!dec->target_surfaces[i]) continue;
770
771      nr_components = util_format_get_nr_components(dec->target_surfaces[i]->texture->format);
772      for (j = 0; j < nr_components; ++j, ++component) {
773         if (!buf->num_ycbcr_blocks[i]) continue;
774
775         vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component);
776         dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
777
778         if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
779            vl_idct_prepare_stage2(i ? &dec->idct_c : &dec->idct_y, &buf->idct[component]);
780         else {
781            dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]);
782            dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
783         }
784         vl_mc_render_ycbcr(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], j, buf->num_ycbcr_blocks[component]);
785      }
786   }
787}
788
789static void
790vl_mpeg12_flush(struct pipe_video_decoder *decoder)
791{
792   assert(decoder);
793
794   //Noop, for shaders it is much faster to flush everything in end_frame
795}
796
797static bool
798init_pipe_state(struct vl_mpeg12_decoder *dec)
799{
800   struct pipe_depth_stencil_alpha_state dsa;
801   struct pipe_sampler_state sampler;
802   unsigned i;
803
804   assert(dec);
805
806   memset(&dsa, 0, sizeof dsa);
807   dsa.depth.enabled = 0;
808   dsa.depth.writemask = 0;
809   dsa.depth.func = PIPE_FUNC_ALWAYS;
810   for (i = 0; i < 2; ++i) {
811      dsa.stencil[i].enabled = 0;
812      dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
813      dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
814      dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
815      dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
816      dsa.stencil[i].valuemask = 0;
817      dsa.stencil[i].writemask = 0;
818   }
819   dsa.alpha.enabled = 0;
820   dsa.alpha.func = PIPE_FUNC_ALWAYS;
821   dsa.alpha.ref_value = 0;
822   dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa);
823   dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa);
824
825   memset(&sampler, 0, sizeof(sampler));
826   sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
827   sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
828   sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
829   sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
830   sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
831   sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
832   sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
833   sampler.compare_func = PIPE_FUNC_ALWAYS;
834   sampler.normalized_coords = 1;
835   dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler);
836   if (!dec->sampler_ycbcr)
837      return false;
838
839   return true;
840}
841
842static const struct format_config*
843find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config configs[], unsigned num_configs)
844{
845   struct pipe_screen *screen;
846   unsigned i;
847
848   assert(dec);
849
850   screen = dec->base.context->screen;
851
852   for (i = 0; i < num_configs; ++i) {
853      if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
854                                       1, PIPE_BIND_SAMPLER_VIEW))
855         continue;
856
857      if (configs[i].idct_source_format != PIPE_FORMAT_NONE) {
858         if (!screen->is_format_supported(screen, configs[i].idct_source_format, PIPE_TEXTURE_2D,
859                                          1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
860            continue;
861
862         if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_3D,
863                                          1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
864            continue;
865      } else {
866         if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_2D,
867                                          1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
868            continue;
869      }
870      return &configs[i];
871   }
872
873   return NULL;
874}
875
876static bool
877init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
878{
879   unsigned num_channels;
880
881   assert(dec);
882
883   dec->zscan_source_format = format_config->zscan_source_format;
884   dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line);
885   dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line);
886   dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line);
887
888   num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
889
890   if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height,
891                      dec->blocks_per_line, dec->num_blocks, num_channels))
892      return false;
893
894   if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height,
895                      dec->blocks_per_line, dec->num_blocks, num_channels))
896      return false;
897
898   return true;
899}
900
901static bool
902init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
903{
904   unsigned nr_of_idct_render_targets, max_inst;
905   enum pipe_format formats[3];
906
907   struct pipe_sampler_view *matrix = NULL;
908
909   nr_of_idct_render_targets = dec->base.context->screen->get_param
910   (
911      dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS
912   );
913
914   max_inst = dec->base.context->screen->get_shader_param
915   (
916      dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
917   );
918
919   // Just assume we need 32 inst per render target, not 100% true, but should work in most cases
920   if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
921      // more than 4 render targets usually doesn't makes any seens
922      nr_of_idct_render_targets = 4;
923   else
924      nr_of_idct_render_targets = 1;
925
926   formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
927   dec->idct_source = vl_video_buffer_create_ex
928   (
929      dec->base.context, dec->base.width / 4, dec->base.height, 1,
930      dec->base.chroma_format, formats, PIPE_USAGE_STATIC
931   );
932
933   if (!dec->idct_source)
934      goto error_idct_source;
935
936   formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
937   dec->mc_source = vl_video_buffer_create_ex
938   (
939      dec->base.context, dec->base.width / nr_of_idct_render_targets,
940      dec->base.height / 4, nr_of_idct_render_targets,
941      dec->base.chroma_format, formats, PIPE_USAGE_STATIC
942   );
943
944   if (!dec->mc_source)
945      goto error_mc_source;
946
947   if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale)))
948      goto error_matrix;
949
950   if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height,
951                     nr_of_idct_render_targets, matrix, matrix))
952      goto error_y;
953
954   if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height,
955                    nr_of_idct_render_targets, matrix, matrix))
956      goto error_c;
957
958   pipe_sampler_view_reference(&matrix, NULL);
959
960   return true;
961
962error_c:
963   vl_idct_cleanup(&dec->idct_y);
964
965error_y:
966   pipe_sampler_view_reference(&matrix, NULL);
967
968error_matrix:
969   dec->mc_source->destroy(dec->mc_source);
970
971error_mc_source:
972   dec->idct_source->destroy(dec->idct_source);
973
974error_idct_source:
975   return false;
976}
977
978static bool
979init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
980{
981   enum pipe_format formats[3];
982
983   formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
984   dec->mc_source = vl_video_buffer_create_ex
985   (
986      dec->base.context, dec->base.width, dec->base.height, 1,
987      dec->base.chroma_format, formats, PIPE_USAGE_STATIC
988   );
989
990   return dec->mc_source != NULL;
991}
992
993static void
994mc_vert_shader_callback(void *priv, struct vl_mc *mc,
995                        struct ureg_program *shader,
996                        unsigned first_output,
997                        struct ureg_dst tex)
998{
999   struct vl_mpeg12_decoder *dec = priv;
1000   struct ureg_dst o_vtex;
1001
1002   assert(priv && mc);
1003   assert(shader);
1004
1005   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1006      struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1007      vl_idct_stage2_vert_shader(idct, shader, first_output, tex);
1008   } else {
1009      o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output);
1010      ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_XY), ureg_src(tex));
1011   }
1012}
1013
1014static void
1015mc_frag_shader_callback(void *priv, struct vl_mc *mc,
1016                        struct ureg_program *shader,
1017                        unsigned first_input,
1018                        struct ureg_dst dst)
1019{
1020   struct vl_mpeg12_decoder *dec = priv;
1021   struct ureg_src src, sampler;
1022
1023   assert(priv && mc);
1024   assert(shader);
1025
1026   if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1027      struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1028      vl_idct_stage2_frag_shader(idct, shader, first_input, dst);
1029   } else {
1030      src = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input, TGSI_INTERPOLATE_LINEAR);
1031      sampler = ureg_DECL_sampler(shader, 0);
1032      ureg_TEX(shader, dst, TGSI_TEXTURE_2D, src, sampler);
1033   }
1034}
1035
1036struct pipe_video_decoder *
1037vl_create_mpeg12_decoder(struct pipe_context *context,
1038                         enum pipe_video_profile profile,
1039                         enum pipe_video_entrypoint entrypoint,
1040                         enum pipe_video_chroma_format chroma_format,
1041                         unsigned width, unsigned height, unsigned max_references)
1042{
1043   const unsigned block_size_pixels = BLOCK_WIDTH * BLOCK_HEIGHT;
1044   const struct format_config *format_config;
1045   struct vl_mpeg12_decoder *dec;
1046
1047   assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
1048
1049   dec = CALLOC_STRUCT(vl_mpeg12_decoder);
1050
1051   if (!dec)
1052      return NULL;
1053
1054   dec->base.context = context;
1055   dec->base.profile = profile;
1056   dec->base.entrypoint = entrypoint;
1057   dec->base.chroma_format = chroma_format;
1058   dec->base.width = width;
1059   dec->base.height = height;
1060   dec->base.max_references = max_references;
1061
1062   dec->base.destroy = vl_mpeg12_destroy;
1063   dec->base.create_buffer = vl_mpeg12_create_buffer;
1064   dec->base.destroy_buffer = vl_mpeg12_destroy_buffer;
1065   dec->base.set_decode_buffer = vl_mpeg12_set_decode_buffer;
1066   dec->base.set_picture_parameters = vl_mpeg12_set_picture_parameters;
1067   dec->base.set_quant_matrix = vl_mpeg12_set_quant_matrix;
1068   dec->base.set_decode_target = vl_mpeg12_set_decode_target;
1069   dec->base.set_reference_frames = vl_mpeg12_set_reference_frames;
1070   dec->base.begin_frame = vl_mpeg12_begin_frame;
1071   dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
1072   dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
1073   dec->base.end_frame = vl_mpeg12_end_frame;
1074   dec->base.flush = vl_mpeg12_flush;
1075
1076   dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
1077   dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
1078   dec->width_in_macroblocks = align(dec->base.width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
1079
1080   /* TODO: Implement 422, 444 */
1081   assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
1082
1083   if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
1084      dec->chroma_width = dec->base.width / 2;
1085      dec->chroma_height = dec->base.height / 2;
1086      dec->num_blocks = dec->num_blocks * 2;
1087   } else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
1088      dec->chroma_width = dec->base.width;
1089      dec->chroma_height = dec->base.height / 2;
1090      dec->num_blocks = dec->num_blocks * 2 + dec->num_blocks;
1091   } else {
1092      dec->chroma_width = dec->base.width;
1093      dec->chroma_height = dec->base.height;
1094      dec->num_blocks = dec->num_blocks * 3;
1095   }
1096
1097   dec->quads = vl_vb_upload_quads(dec->base.context);
1098   dec->pos = vl_vb_upload_pos(
1099      dec->base.context,
1100      dec->base.width / MACROBLOCK_WIDTH,
1101      dec->base.height / MACROBLOCK_HEIGHT
1102   );
1103
1104   dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
1105   dec->ves_mv = vl_vb_get_ves_mv(dec->base.context);
1106
1107   switch (entrypoint) {
1108   case PIPE_VIDEO_ENTRYPOINT_BITSTREAM:
1109      format_config = find_format_config(dec, bitstream_format_config, num_bitstream_format_configs);
1110      break;
1111
1112   case PIPE_VIDEO_ENTRYPOINT_IDCT:
1113      format_config = find_format_config(dec, idct_format_config, num_idct_format_configs);
1114      break;
1115
1116   case PIPE_VIDEO_ENTRYPOINT_MC:
1117      format_config = find_format_config(dec, mc_format_config, num_mc_format_configs);
1118      break;
1119
1120   default:
1121      assert(0);
1122      FREE(dec);
1123      return NULL;
1124   }
1125
1126   if (!format_config) {
1127      FREE(dec);
1128      return NULL;
1129   }
1130
1131   if (!init_zscan(dec, format_config))
1132      goto error_zscan;
1133
1134   if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1135      if (!init_idct(dec, format_config))
1136         goto error_sources;
1137   } else {
1138      if (!init_mc_source_widthout_idct(dec, format_config))
1139         goto error_sources;
1140   }
1141
1142   if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
1143                   MACROBLOCK_HEIGHT, format_config->mc_scale,
1144                   mc_vert_shader_callback, mc_frag_shader_callback, dec))
1145      goto error_mc_y;
1146
1147   // TODO
1148   if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
1149                   BLOCK_HEIGHT, format_config->mc_scale,
1150                   mc_vert_shader_callback, mc_frag_shader_callback, dec))
1151      goto error_mc_c;
1152
1153   if (!init_pipe_state(dec))
1154      goto error_pipe_state;
1155
1156   memset(dec->intra_matrix, 0x10, 64);
1157   memset(dec->non_intra_matrix, 0x10, 64);
1158
1159   return &dec->base;
1160
1161error_pipe_state:
1162   vl_mc_cleanup(&dec->mc_c);
1163
1164error_mc_c:
1165   vl_mc_cleanup(&dec->mc_y);
1166
1167error_mc_y:
1168   if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1169      vl_idct_cleanup(&dec->idct_y);
1170      vl_idct_cleanup(&dec->idct_c);
1171      dec->idct_source->destroy(dec->idct_source);
1172   }
1173   dec->mc_source->destroy(dec->mc_source);
1174
1175error_sources:
1176   vl_zscan_cleanup(&dec->zscan_y);
1177   vl_zscan_cleanup(&dec->zscan_c);
1178
1179error_zscan:
1180   FREE(dec);
1181   return NULL;
1182}
1183