vl_mpeg12_decoder.c revision 31096e13f858daf896c0c53077fb25e92da089a6
17faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez/************************************************************************** 27faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * 37faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * Copyright 2009 Younes Manton. 47faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * All Rights Reserved. 57faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * 67faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * Permission is hereby granted, free of charge, to any person obtaining a 77faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * copy of this software and associated documentation files (the 87faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * "Software"), to deal in the Software without restriction, including 97faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * without limitation the rights to use, copy, modify, merge, publish, 107faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * distribute, sub license, and/or sell copies of the Software, and to 117faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * permit persons to whom the Software is furnished to do so, subject to 127faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * the following conditions: 137faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * 147faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * The above copyright notice and this permission notice (including the 157faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * next paragraph) shall be included in all copies or substantial portions 167faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * of the Software. 177faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * 187faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 197faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 207faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 217faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 227faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 237faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 247faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 257faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez * 267faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez **************************************************************************/ 277faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 287faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#include <math.h> 297faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#include <assert.h> 307faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 317faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#include <util/u_memory.h> 327faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#include <util/u_rect.h> 337faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#include <util/u_sampler.h> 347faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#include <util/u_video.h> 357faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 367faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#include "vl_mpeg12_decoder.h" 377faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#include "vl_defines.h" 387faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 397faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#define SCALE_FACTOR_SNORM (32768.0f / 256.0f) 407faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez#define SCALE_FACTOR_SSCALED (1.0f / 256.0f) 417faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 427faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandezstruct format_config { 437faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez enum pipe_format zscan_source_format; 447faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez enum pipe_format idct_source_format; 457faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez enum pipe_format mc_source_format; 467faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 477faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez float idct_scale; 487faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez float mc_scale; 497faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez}; 507faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 517faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandezstatic const struct format_config bitstream_format_config[] = { 527faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED }, 537faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED }, 547faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM }, 557faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM } 567faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez}; 577faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 587faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandezstatic const unsigned num_bitstream_format_configs = 597faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez sizeof(bitstream_format_config) / sizeof(struct format_config); 607faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 617faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandezstatic const struct format_config idct_format_config[] = { 627faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED }, 637faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED }, 647faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM }, 657faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM } 667faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez}; 677faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 687faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandezstatic const unsigned num_idct_format_configs = 697faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez sizeof(idct_format_config) / sizeof(struct format_config); 707faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 717faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandezstatic const struct format_config mc_format_config[] = { 727faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez //{ PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SSCALED, 0.0f, SCALE_FACTOR_SSCALED }, 737faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SNORM, 0.0f, SCALE_FACTOR_SNORM } 747faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez}; 757faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 767faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandezstatic const unsigned num_mc_format_configs = 777faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez sizeof(mc_format_config) / sizeof(struct format_config); 787faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez 797faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandezstatic const unsigned const_empty_block_mask_420[3][2][2] = { 807faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { { 0x20, 0x10 }, { 0x08, 0x04 } }, 817faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { { 0x02, 0x02 }, { 0x02, 0x02 } }, 827faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez { { 0x01, 0x01 }, { 0x01, 0x01 } } 837faaa9f3f0df9d23790277834d426c3d992ac3baCarlos Hernandez}; 84 85static bool 86init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer) 87{ 88 struct pipe_resource *res, res_tmpl; 89 struct pipe_sampler_view sv_tmpl; 90 struct pipe_surface **destination; 91 92 unsigned i; 93 94 assert(dec && buffer); 95 96 memset(&res_tmpl, 0, sizeof(res_tmpl)); 97 res_tmpl.target = PIPE_TEXTURE_2D; 98 res_tmpl.format = dec->zscan_source_format; 99 res_tmpl.width0 = dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT; 100 res_tmpl.height0 = align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line; 101 res_tmpl.depth0 = 1; 102 res_tmpl.array_size = 1; 103 res_tmpl.usage = PIPE_USAGE_STREAM; 104 res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW; 105 106 res = dec->base.context->screen->resource_create(dec->base.context->screen, &res_tmpl); 107 if (!res) 108 goto error_source; 109 110 111 memset(&sv_tmpl, 0, sizeof(sv_tmpl)); 112 u_sampler_view_default_template(&sv_tmpl, res, res->format); 113 sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = PIPE_SWIZZLE_RED; 114 buffer->zscan_source = dec->base.context->create_sampler_view(dec->base.context, res, &sv_tmpl); 115 pipe_resource_reference(&res, NULL); 116 if (!buffer->zscan_source) 117 goto error_sampler; 118 119 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) 120 destination = dec->idct_source->get_surfaces(dec->idct_source); 121 else 122 destination = dec->mc_source->get_surfaces(dec->mc_source); 123 124 if (!destination) 125 goto error_surface; 126 127 for (i = 0; i < VL_MAX_PLANES; ++i) 128 if (!vl_zscan_init_buffer(i == 0 ? &dec->zscan_y : &dec->zscan_c, 129 &buffer->zscan[i], buffer->zscan_source, destination[i])) 130 goto error_plane; 131 132 return true; 133 134error_plane: 135 for (; i > 0; --i) 136 vl_zscan_cleanup_buffer(&buffer->zscan[i - 1]); 137 138error_surface: 139error_sampler: 140 pipe_sampler_view_reference(&buffer->zscan_source, NULL); 141 142error_source: 143 return false; 144} 145 146static void 147cleanup_zscan_buffer(struct vl_mpeg12_buffer *buffer) 148{ 149 unsigned i; 150 151 assert(buffer); 152 153 for (i = 0; i < VL_MAX_PLANES; ++i) 154 vl_zscan_cleanup_buffer(&buffer->zscan[i]); 155 156 pipe_sampler_view_reference(&buffer->zscan_source, NULL); 157} 158 159static bool 160init_idct_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer) 161{ 162 struct pipe_sampler_view **idct_source_sv, **mc_source_sv; 163 164 unsigned i; 165 166 assert(dec && buffer); 167 168 idct_source_sv = dec->idct_source->get_sampler_view_planes(dec->idct_source); 169 if (!idct_source_sv) 170 goto error_source_sv; 171 172 mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source); 173 if (!mc_source_sv) 174 goto error_mc_source_sv; 175 176 for (i = 0; i < 3; ++i) 177 if (!vl_idct_init_buffer(i == 0 ? &dec->idct_y : &dec->idct_c, 178 &buffer->idct[i], idct_source_sv[i], 179 mc_source_sv[i])) 180 goto error_plane; 181 182 return true; 183 184error_plane: 185 for (; i > 0; --i) 186 vl_idct_cleanup_buffer(&buffer->idct[i - 1]); 187 188error_mc_source_sv: 189error_source_sv: 190 return false; 191} 192 193static void 194cleanup_idct_buffer(struct vl_mpeg12_buffer *buf) 195{ 196 unsigned i; 197 198 assert(buf); 199 200 for (i = 0; i < 3; ++i) 201 vl_idct_cleanup_buffer(&buf->idct[0]); 202} 203 204static bool 205init_mc_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buf) 206{ 207 assert(dec && buf); 208 209 if(!vl_mc_init_buffer(&dec->mc_y, &buf->mc[0])) 210 goto error_mc_y; 211 212 if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[1])) 213 goto error_mc_cb; 214 215 if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[2])) 216 goto error_mc_cr; 217 218 return true; 219 220error_mc_cr: 221 vl_mc_cleanup_buffer(&buf->mc[1]); 222 223error_mc_cb: 224 vl_mc_cleanup_buffer(&buf->mc[0]); 225 226error_mc_y: 227 return false; 228} 229 230static void 231cleanup_mc_buffer(struct vl_mpeg12_buffer *buf) 232{ 233 unsigned i; 234 235 assert(buf); 236 237 for (i = 0; i < VL_MAX_PLANES; ++i) 238 vl_mc_cleanup_buffer(&buf->mc[i]); 239} 240 241static inline void 242MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock *mb, unsigned weights[2]) 243{ 244 assert(mb); 245 246 switch (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) { 247 case PIPE_MPEG12_MB_TYPE_MOTION_FORWARD: 248 weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX; 249 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN; 250 break; 251 252 case (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD): 253 weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF; 254 weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF; 255 break; 256 257 case PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD: 258 weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN; 259 weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX; 260 break; 261 262 default: 263 if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_PATTERN) { 264 /* patern without a motion vector, just copy the old frame content */ 265 weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX; 266 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN; 267 } else { 268 weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN; 269 weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN; 270 } 271 break; 272 } 273} 274 275static inline struct vl_motionvector 276MotionVectorToPipe(const struct pipe_mpeg12_macroblock *mb, unsigned vector, 277 unsigned field_select_mask, unsigned weight) 278{ 279 struct vl_motionvector mv; 280 281 assert(mb); 282 283 if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) { 284 switch (mb->macroblock_modes.bits.frame_motion_type) { 285 case PIPE_MPEG12_MO_TYPE_FRAME: 286 mv.top.x = mb->PMV[0][vector][0]; 287 mv.top.y = mb->PMV[0][vector][1]; 288 mv.top.field_select = PIPE_VIDEO_FRAME; 289 mv.top.weight = weight; 290 291 mv.bottom.x = mb->PMV[0][vector][0]; 292 mv.bottom.y = mb->PMV[0][vector][1]; 293 mv.bottom.weight = weight; 294 mv.bottom.field_select = PIPE_VIDEO_FRAME; 295 break; 296 297 case PIPE_MPEG12_MO_TYPE_FIELD: 298 mv.top.x = mb->PMV[0][vector][0]; 299 mv.top.y = mb->PMV[0][vector][1]; 300 mv.top.field_select = (mb->motion_vertical_field_select & field_select_mask) ? 301 PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD; 302 mv.top.weight = weight; 303 304 mv.bottom.x = mb->PMV[1][vector][0]; 305 mv.bottom.y = mb->PMV[1][vector][1]; 306 mv.bottom.field_select = (mb->motion_vertical_field_select & (field_select_mask << 2)) ? 307 PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD; 308 mv.bottom.weight = weight; 309 break; 310 311 default: // TODO: Support DUALPRIME and 16x8 312 break; 313 } 314 } else { 315 mv.top.x = mv.top.y = 0; 316 mv.top.field_select = PIPE_VIDEO_FRAME; 317 mv.top.weight = weight; 318 319 mv.bottom.x = mv.bottom.y = 0; 320 mv.bottom.field_select = PIPE_VIDEO_FRAME; 321 mv.bottom.weight = weight; 322 } 323 return mv; 324} 325 326static inline void 327UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec, 328 struct vl_mpeg12_buffer *buf, 329 const struct pipe_mpeg12_macroblock *mb) 330{ 331 unsigned intra; 332 unsigned tb, x, y, num_blocks = 0; 333 334 assert(dec && buf); 335 assert(mb); 336 337 if (!mb->coded_block_pattern) 338 return; 339 340 intra = mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA ? 1 : 0; 341 342 for (y = 0; y < 2; ++y) { 343 for (x = 0; x < 2; ++x, ++tb) { 344 if (mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) { 345 346 struct vl_ycbcr_block *stream = buf->ycbcr_stream[0]; 347 stream->x = mb->x * 2 + x; 348 stream->y = mb->y * 2 + y; 349 stream->intra = intra; 350 stream->coding = mb->macroblock_modes.bits.dct_type; 351 stream->block_num = buf->block_num++; 352 353 buf->num_ycbcr_blocks[0]++; 354 buf->ycbcr_stream[0]++; 355 356 num_blocks++; 357 } 358 } 359 } 360 361 /* TODO: Implement 422, 444 */ 362 //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420); 363 364 for (tb = 1; tb < 3; ++tb) { 365 if (mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) { 366 367 struct vl_ycbcr_block *stream = buf->ycbcr_stream[tb]; 368 stream->x = mb->x; 369 stream->y = mb->y; 370 stream->intra = intra; 371 stream->coding = 0; 372 stream->block_num = buf->block_num++; 373 374 buf->num_ycbcr_blocks[tb]++; 375 buf->ycbcr_stream[tb]++; 376 377 num_blocks++; 378 } 379 } 380 381 memcpy(buf->texels, mb->blocks, 64 * sizeof(short) * num_blocks); 382 buf->texels += 64 * num_blocks; 383} 384 385static void 386vl_mpeg12_destroy(struct pipe_video_decoder *decoder) 387{ 388 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder; 389 390 assert(decoder); 391 392 /* Asserted in softpipe_delete_fs_state() for some reason */ 393 dec->base.context->bind_vs_state(dec->base.context, NULL); 394 dec->base.context->bind_fs_state(dec->base.context, NULL); 395 396 dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa); 397 dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr); 398 399 vl_mc_cleanup(&dec->mc_y); 400 vl_mc_cleanup(&dec->mc_c); 401 dec->mc_source->destroy(dec->mc_source); 402 403 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { 404 vl_idct_cleanup(&dec->idct_y); 405 vl_idct_cleanup(&dec->idct_c); 406 dec->idct_source->destroy(dec->idct_source); 407 } 408 409 vl_zscan_cleanup(&dec->zscan_y); 410 vl_zscan_cleanup(&dec->zscan_c); 411 412 dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr); 413 dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv); 414 415 pipe_resource_reference(&dec->quads.buffer, NULL); 416 pipe_resource_reference(&dec->pos.buffer, NULL); 417 418 pipe_sampler_view_reference(&dec->zscan_linear, NULL); 419 pipe_sampler_view_reference(&dec->zscan_normal, NULL); 420 pipe_sampler_view_reference(&dec->zscan_alternate, NULL); 421 422 FREE(dec); 423} 424 425static void * 426vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder) 427{ 428 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder; 429 struct vl_mpeg12_buffer *buffer; 430 431 assert(dec); 432 433 buffer = CALLOC_STRUCT(vl_mpeg12_buffer); 434 if (buffer == NULL) 435 return NULL; 436 437 if (!vl_vb_init(&buffer->vertex_stream, dec->base.context, 438 dec->base.width / MACROBLOCK_WIDTH, 439 dec->base.height / MACROBLOCK_HEIGHT)) 440 goto error_vertex_buffer; 441 442 if (!init_mc_buffer(dec, buffer)) 443 goto error_mc; 444 445 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) 446 if (!init_idct_buffer(dec, buffer)) 447 goto error_idct; 448 449 if (!init_zscan_buffer(dec, buffer)) 450 goto error_zscan; 451 452 if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) 453 vl_mpg12_bs_init(&buffer->bs, 454 dec->base.width / MACROBLOCK_WIDTH, 455 dec->base.height / MACROBLOCK_HEIGHT); 456 457 return buffer; 458 459error_zscan: 460 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) 461 cleanup_idct_buffer(buffer); 462 463error_idct: 464 cleanup_mc_buffer(buffer); 465 466error_mc: 467 vl_vb_cleanup(&buffer->vertex_stream); 468 469error_vertex_buffer: 470 FREE(buffer); 471 return NULL; 472} 473 474static void 475vl_mpeg12_destroy_buffer(struct pipe_video_decoder *decoder, void *buffer) 476{ 477 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder; 478 struct vl_mpeg12_buffer *buf = buffer; 479 480 assert(dec && buf); 481 482 cleanup_zscan_buffer(buf); 483 484 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) 485 cleanup_idct_buffer(buf); 486 487 cleanup_mc_buffer(buf); 488 489 vl_vb_cleanup(&buf->vertex_stream); 490 491 FREE(buf); 492} 493 494static void 495vl_mpeg12_set_decode_buffer(struct pipe_video_decoder *decoder, void *buffer) 496{ 497 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; 498 499 assert(dec && buffer); 500 501 dec->current_buffer = buffer; 502} 503 504static void 505vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder, 506 struct pipe_picture_desc *picture) 507{ 508 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; 509 struct pipe_mpeg12_picture_desc *pic = (struct pipe_mpeg12_picture_desc *)picture; 510 511 assert(dec && pic); 512 513 dec->picture_desc = *pic; 514} 515 516static void 517vl_mpeg12_set_quant_matrix(struct pipe_video_decoder *decoder, 518 const struct pipe_quant_matrix *matrix) 519{ 520 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; 521 const struct pipe_mpeg12_quant_matrix *m = (const struct pipe_mpeg12_quant_matrix *)matrix; 522 523 assert(dec); 524 assert(matrix->codec == PIPE_VIDEO_CODEC_MPEG12); 525 526 memcpy(dec->intra_matrix, m->intra_matrix, 64); 527 memcpy(dec->non_intra_matrix, m->non_intra_matrix, 64); 528} 529 530static void 531vl_mpeg12_set_decode_target(struct pipe_video_decoder *decoder, 532 struct pipe_video_buffer *target) 533{ 534 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; 535 struct pipe_surface **surfaces; 536 unsigned i; 537 538 assert(dec); 539 540 surfaces = target->get_surfaces(target); 541 for (i = 0; i < VL_MAX_PLANES; ++i) 542 pipe_surface_reference(&dec->target_surfaces[i], surfaces[i]); 543} 544 545static void 546vl_mpeg12_set_reference_frames(struct pipe_video_decoder *decoder, 547 struct pipe_video_buffer **ref_frames, 548 unsigned num_ref_frames) 549{ 550 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; 551 struct pipe_sampler_view **sv; 552 unsigned i,j; 553 554 assert(dec); 555 assert(num_ref_frames <= VL_MAX_REF_FRAMES); 556 557 for (i = 0; i < num_ref_frames; ++i) { 558 sv = ref_frames[i]->get_sampler_view_planes(ref_frames[i]); 559 for (j = 0; j < VL_MAX_PLANES; ++j) 560 pipe_sampler_view_reference(&dec->ref_frames[i][j], sv[j]); 561 } 562 563 for (; i < VL_MAX_REF_FRAMES; ++i) 564 for (j = 0; j < VL_MAX_PLANES; ++j) 565 pipe_sampler_view_reference(&dec->ref_frames[i][j], NULL); 566} 567 568static void 569vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder) 570{ 571 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; 572 struct vl_mpeg12_buffer *buf; 573 574 struct pipe_resource *tex; 575 struct pipe_box rect = { 0, 0, 0, 1, 1, 1 }; 576 577 unsigned i; 578 579 assert(dec); 580 581 buf = dec->current_buffer; 582 assert(buf); 583 584 if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) 585 dec->intra_matrix[0] = 1 << (7 - dec->picture_desc.intra_dc_precision); 586 587 for (i = 0; i < VL_MAX_PLANES; ++i) { 588 vl_zscan_upload_quant(&buf->zscan[i], dec->intra_matrix, true); 589 vl_zscan_upload_quant(&buf->zscan[i], dec->non_intra_matrix, false); 590 } 591 592 vl_vb_map(&buf->vertex_stream, dec->base.context); 593 594 tex = buf->zscan_source->texture; 595 rect.width = tex->width0; 596 rect.height = tex->height0; 597 598 buf->tex_transfer = dec->base.context->get_transfer 599 ( 600 dec->base.context, tex, 601 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD, 602 &rect 603 ); 604 605 buf->block_num = 0; 606 buf->texels = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer); 607 608 for (i = 0; i < VL_MAX_PLANES; ++i) { 609 buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i); 610 buf->num_ycbcr_blocks[i] = 0; 611 } 612 613 for (i = 0; i < VL_MAX_REF_FRAMES; ++i) 614 buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i); 615 616 if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) { 617 vl_mpg12_bs_set_buffers(&buf->bs, buf->ycbcr_stream, buf->texels, buf->mv_stream); 618 619 } else { 620 621 for (i = 0; i < VL_MAX_PLANES; ++i) 622 vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear); 623 } 624} 625 626static void 627vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder, 628 const struct pipe_macroblock *macroblocks, 629 unsigned num_macroblocks) 630{ 631 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; 632 const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks; 633 struct vl_mpeg12_buffer *buf; 634 635 unsigned i, j, mv_weights[2]; 636 637 assert(dec && dec->current_buffer); 638 assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12); 639 640 buf = dec->current_buffer; 641 assert(buf); 642 643 for (; num_macroblocks > 0; --num_macroblocks) { 644 unsigned mb_addr = mb->y * dec->width_in_macroblocks + mb->x; 645 646 if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_PATTERN | PIPE_MPEG12_MB_TYPE_INTRA)) 647 UploadYcbcrBlocks(dec, buf, mb); 648 649 MacroBlockTypeToPipeWeights(mb, mv_weights); 650 651 for (i = 0; i < 2; ++i) { 652 if (!dec->ref_frames[i][0]) continue; 653 654 buf->mv_stream[i][mb_addr] = MotionVectorToPipe 655 ( 656 mb, i, 657 i ? PIPE_MPEG12_FS_FIRST_BACKWARD : PIPE_MPEG12_FS_FIRST_FORWARD, 658 mv_weights[i] 659 ); 660 } 661 662 /* see section 7.6.6 of the spec */ 663 if (mb->num_skipped_macroblocks > 0) { 664 struct vl_motionvector skipped_mv[2]; 665 666 if (dec->ref_frames[0][0] && !dec->ref_frames[1][0]) { 667 skipped_mv[0].top.x = skipped_mv[0].top.y = 0; 668 skipped_mv[0].top.weight = PIPE_VIDEO_MV_WEIGHT_MAX; 669 } else { 670 skipped_mv[0] = buf->mv_stream[0][mb_addr]; 671 skipped_mv[1] = buf->mv_stream[1][mb_addr]; 672 } 673 skipped_mv[0].top.field_select = PIPE_VIDEO_FRAME; 674 skipped_mv[1].top.field_select = PIPE_VIDEO_FRAME; 675 676 skipped_mv[0].bottom = skipped_mv[0].top; 677 skipped_mv[1].bottom = skipped_mv[1].top; 678 679 ++mb_addr; 680 for (i = 0; i < mb->num_skipped_macroblocks; ++i, ++mb_addr) { 681 for (j = 0; j < 2; ++j) { 682 if (!dec->ref_frames[j][0]) continue; 683 buf->mv_stream[j][mb_addr] = skipped_mv[j]; 684 685 } 686 } 687 } 688 689 ++mb; 690 } 691} 692 693static void 694vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder, 695 unsigned num_bytes, const void *data) 696{ 697 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; 698 struct vl_mpeg12_buffer *buf; 699 700 unsigned i; 701 702 assert(dec && dec->current_buffer); 703 704 buf = dec->current_buffer; 705 assert(buf); 706 707 for (i = 0; i < VL_MAX_PLANES; ++i) 708 vl_zscan_set_layout(&buf->zscan[i], dec->picture_desc.alternate_scan ? 709 dec->zscan_alternate : dec->zscan_normal); 710 711 vl_mpg12_bs_decode(&buf->bs, num_bytes, data, &dec->picture_desc, buf->num_ycbcr_blocks); 712} 713 714static void 715vl_mpeg12_end_frame(struct pipe_video_decoder *decoder) 716{ 717 struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder; 718 struct pipe_sampler_view **mc_source_sv; 719 struct pipe_vertex_buffer vb[3]; 720 struct vl_mpeg12_buffer *buf; 721 722 unsigned i, j, component; 723 unsigned nr_components; 724 725 assert(dec && dec->current_buffer); 726 727 buf = dec->current_buffer; 728 729 vl_vb_unmap(&buf->vertex_stream, dec->base.context); 730 731 dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer); 732 dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer); 733 734 vb[0] = dec->quads; 735 vb[1] = dec->pos; 736 737 dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv); 738 for (i = 0; i < VL_MAX_PLANES; ++i) { 739 if (!dec->target_surfaces[i]) continue; 740 741 vl_mc_set_surface(&buf->mc[i], dec->target_surfaces[i]); 742 743 for (j = 0; j < VL_MAX_REF_FRAMES; ++j) { 744 if (!dec->ref_frames[j][i]) continue; 745 746 vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);; 747 dec->base.context->set_vertex_buffers(dec->base.context, 3, vb); 748 749 vl_mc_render_ref(&buf->mc[i], dec->ref_frames[j][i]); 750 } 751 } 752 753 dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr); 754 for (i = 0; i < VL_MAX_PLANES; ++i) { 755 if (!buf->num_ycbcr_blocks[i]) continue; 756 757 vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i); 758 dec->base.context->set_vertex_buffers(dec->base.context, 2, vb); 759 760 vl_zscan_render(&buf->zscan[i] , buf->num_ycbcr_blocks[i]); 761 762 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) 763 vl_idct_flush(&buf->idct[i], buf->num_ycbcr_blocks[i]); 764 } 765 766 mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source); 767 for (i = 0, component = 0; i < VL_MAX_PLANES; ++i) { 768 if (!dec->target_surfaces[i]) continue; 769 770 nr_components = util_format_get_nr_components(dec->target_surfaces[i]->texture->format); 771 for (j = 0; j < nr_components; ++j, ++component) { 772 if (!buf->num_ycbcr_blocks[i]) continue; 773 774 vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component); 775 dec->base.context->set_vertex_buffers(dec->base.context, 2, vb); 776 777 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) 778 vl_idct_prepare_stage2(&buf->idct[component]); 779 else { 780 dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]); 781 dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr); 782 } 783 vl_mc_render_ycbcr(&buf->mc[i], j, buf->num_ycbcr_blocks[component]); 784 } 785 } 786} 787 788static void 789vl_mpeg12_flush(struct pipe_video_decoder *decoder) 790{ 791 assert(decoder); 792 793 //Noop, for shaders it is much faster to flush everything in end_frame 794} 795 796static bool 797init_pipe_state(struct vl_mpeg12_decoder *dec) 798{ 799 struct pipe_depth_stencil_alpha_state dsa; 800 struct pipe_sampler_state sampler; 801 unsigned i; 802 803 assert(dec); 804 805 memset(&dsa, 0, sizeof dsa); 806 dsa.depth.enabled = 0; 807 dsa.depth.writemask = 0; 808 dsa.depth.func = PIPE_FUNC_ALWAYS; 809 for (i = 0; i < 2; ++i) { 810 dsa.stencil[i].enabled = 0; 811 dsa.stencil[i].func = PIPE_FUNC_ALWAYS; 812 dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP; 813 dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP; 814 dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP; 815 dsa.stencil[i].valuemask = 0; 816 dsa.stencil[i].writemask = 0; 817 } 818 dsa.alpha.enabled = 0; 819 dsa.alpha.func = PIPE_FUNC_ALWAYS; 820 dsa.alpha.ref_value = 0; 821 dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa); 822 dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa); 823 824 memset(&sampler, 0, sizeof(sampler)); 825 sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE; 826 sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE; 827 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER; 828 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST; 829 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE; 830 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST; 831 sampler.compare_mode = PIPE_TEX_COMPARE_NONE; 832 sampler.compare_func = PIPE_FUNC_ALWAYS; 833 sampler.normalized_coords = 1; 834 dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler); 835 if (!dec->sampler_ycbcr) 836 return false; 837 838 return true; 839} 840 841static const struct format_config* 842find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config configs[], unsigned num_configs) 843{ 844 struct pipe_screen *screen; 845 unsigned i; 846 847 assert(dec); 848 849 screen = dec->base.context->screen; 850 851 for (i = 0; i < num_configs; ++i) { 852 if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D, 853 1, PIPE_BIND_SAMPLER_VIEW)) 854 continue; 855 856 if (configs[i].idct_source_format != PIPE_FORMAT_NONE) { 857 if (!screen->is_format_supported(screen, configs[i].idct_source_format, PIPE_TEXTURE_2D, 858 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET)) 859 continue; 860 861 if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_3D, 862 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET)) 863 continue; 864 } else { 865 if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_2D, 866 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET)) 867 continue; 868 } 869 return &configs[i]; 870 } 871 872 return NULL; 873} 874 875static bool 876init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_config) 877{ 878 unsigned num_channels; 879 880 assert(dec); 881 882 dec->zscan_source_format = format_config->zscan_source_format; 883 dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line); 884 dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line); 885 dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line); 886 887 num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1; 888 889 if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height, 890 dec->blocks_per_line, dec->num_blocks, num_channels)) 891 return false; 892 893 if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height, 894 dec->blocks_per_line, dec->num_blocks, num_channels)) 895 return false; 896 897 return true; 898} 899 900static bool 901init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config) 902{ 903 unsigned nr_of_idct_render_targets, max_inst; 904 enum pipe_format formats[3]; 905 906 struct pipe_sampler_view *matrix = NULL; 907 908 nr_of_idct_render_targets = dec->base.context->screen->get_param 909 ( 910 dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS 911 ); 912 913 max_inst = dec->base.context->screen->get_shader_param 914 ( 915 dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS 916 ); 917 918 // Just assume we need 32 inst per render target, not 100% true, but should work in most cases 919 if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4) 920 // more than 4 render targets usually doesn't makes any seens 921 nr_of_idct_render_targets = 4; 922 else 923 nr_of_idct_render_targets = 1; 924 925 formats[0] = formats[1] = formats[2] = format_config->idct_source_format; 926 dec->idct_source = vl_video_buffer_create_ex 927 ( 928 dec->base.context, dec->base.width / 4, dec->base.height, 1, 929 dec->base.chroma_format, formats, PIPE_USAGE_STATIC 930 ); 931 932 if (!dec->idct_source) 933 goto error_idct_source; 934 935 formats[0] = formats[1] = formats[2] = format_config->mc_source_format; 936 dec->mc_source = vl_video_buffer_create_ex 937 ( 938 dec->base.context, dec->base.width / nr_of_idct_render_targets, 939 dec->base.height / 4, nr_of_idct_render_targets, 940 dec->base.chroma_format, formats, PIPE_USAGE_STATIC 941 ); 942 943 if (!dec->mc_source) 944 goto error_mc_source; 945 946 if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale))) 947 goto error_matrix; 948 949 if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height, 950 nr_of_idct_render_targets, matrix, matrix)) 951 goto error_y; 952 953 if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height, 954 nr_of_idct_render_targets, matrix, matrix)) 955 goto error_c; 956 957 pipe_sampler_view_reference(&matrix, NULL); 958 959 return true; 960 961error_c: 962 vl_idct_cleanup(&dec->idct_y); 963 964error_y: 965 pipe_sampler_view_reference(&matrix, NULL); 966 967error_matrix: 968 dec->mc_source->destroy(dec->mc_source); 969 970error_mc_source: 971 dec->idct_source->destroy(dec->idct_source); 972 973error_idct_source: 974 return false; 975} 976 977static bool 978init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config) 979{ 980 enum pipe_format formats[3]; 981 982 formats[0] = formats[1] = formats[2] = format_config->mc_source_format; 983 dec->mc_source = vl_video_buffer_create_ex 984 ( 985 dec->base.context, dec->base.width, dec->base.height, 1, 986 dec->base.chroma_format, formats, PIPE_USAGE_STATIC 987 ); 988 989 return dec->mc_source != NULL; 990} 991 992static void 993mc_vert_shader_callback(void *priv, struct vl_mc *mc, 994 struct ureg_program *shader, 995 unsigned first_output, 996 struct ureg_dst tex) 997{ 998 struct vl_mpeg12_decoder *dec = priv; 999 struct ureg_dst o_vtex; 1000 1001 assert(priv && mc); 1002 assert(shader); 1003 1004 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { 1005 struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c; 1006 vl_idct_stage2_vert_shader(idct, shader, first_output, tex); 1007 } else { 1008 o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output); 1009 ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_XY), ureg_src(tex)); 1010 } 1011} 1012 1013static void 1014mc_frag_shader_callback(void *priv, struct vl_mc *mc, 1015 struct ureg_program *shader, 1016 unsigned first_input, 1017 struct ureg_dst dst) 1018{ 1019 struct vl_mpeg12_decoder *dec = priv; 1020 struct ureg_src src, sampler; 1021 1022 assert(priv && mc); 1023 assert(shader); 1024 1025 if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { 1026 struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c; 1027 vl_idct_stage2_frag_shader(idct, shader, first_input, dst); 1028 } else { 1029 src = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input, TGSI_INTERPOLATE_LINEAR); 1030 sampler = ureg_DECL_sampler(shader, 0); 1031 ureg_TEX(shader, dst, TGSI_TEXTURE_2D, src, sampler); 1032 } 1033} 1034 1035struct pipe_video_decoder * 1036vl_create_mpeg12_decoder(struct pipe_context *context, 1037 enum pipe_video_profile profile, 1038 enum pipe_video_entrypoint entrypoint, 1039 enum pipe_video_chroma_format chroma_format, 1040 unsigned width, unsigned height) 1041{ 1042 const unsigned block_size_pixels = BLOCK_WIDTH * BLOCK_HEIGHT; 1043 const struct format_config *format_config; 1044 struct vl_mpeg12_decoder *dec; 1045 1046 assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12); 1047 1048 dec = CALLOC_STRUCT(vl_mpeg12_decoder); 1049 1050 if (!dec) 1051 return NULL; 1052 1053 dec->base.context = context; 1054 dec->base.profile = profile; 1055 dec->base.entrypoint = entrypoint; 1056 dec->base.chroma_format = chroma_format; 1057 dec->base.width = width; 1058 dec->base.height = height; 1059 1060 dec->base.destroy = vl_mpeg12_destroy; 1061 dec->base.create_buffer = vl_mpeg12_create_buffer; 1062 dec->base.destroy_buffer = vl_mpeg12_destroy_buffer; 1063 dec->base.set_decode_buffer = vl_mpeg12_set_decode_buffer; 1064 dec->base.set_picture_parameters = vl_mpeg12_set_picture_parameters; 1065 dec->base.set_quant_matrix = vl_mpeg12_set_quant_matrix; 1066 dec->base.set_decode_target = vl_mpeg12_set_decode_target; 1067 dec->base.set_reference_frames = vl_mpeg12_set_reference_frames; 1068 dec->base.begin_frame = vl_mpeg12_begin_frame; 1069 dec->base.decode_macroblock = vl_mpeg12_decode_macroblock; 1070 dec->base.decode_bitstream = vl_mpeg12_decode_bitstream; 1071 dec->base.end_frame = vl_mpeg12_end_frame; 1072 dec->base.flush = vl_mpeg12_flush; 1073 1074 dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4); 1075 dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels; 1076 dec->width_in_macroblocks = align(dec->base.width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH; 1077 1078 /* TODO: Implement 422, 444 */ 1079 assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420); 1080 1081 if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) { 1082 dec->chroma_width = dec->base.width / 2; 1083 dec->chroma_height = dec->base.height / 2; 1084 dec->num_blocks = dec->num_blocks * 2; 1085 } else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) { 1086 dec->chroma_width = dec->base.width; 1087 dec->chroma_height = dec->base.height / 2; 1088 dec->num_blocks = dec->num_blocks * 2 + dec->num_blocks; 1089 } else { 1090 dec->chroma_width = dec->base.width; 1091 dec->chroma_height = dec->base.height; 1092 dec->num_blocks = dec->num_blocks * 3; 1093 } 1094 1095 dec->quads = vl_vb_upload_quads(dec->base.context); 1096 dec->pos = vl_vb_upload_pos( 1097 dec->base.context, 1098 dec->base.width / MACROBLOCK_WIDTH, 1099 dec->base.height / MACROBLOCK_HEIGHT 1100 ); 1101 1102 dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context); 1103 dec->ves_mv = vl_vb_get_ves_mv(dec->base.context); 1104 1105 switch (entrypoint) { 1106 case PIPE_VIDEO_ENTRYPOINT_BITSTREAM: 1107 format_config = find_format_config(dec, bitstream_format_config, num_bitstream_format_configs); 1108 break; 1109 1110 case PIPE_VIDEO_ENTRYPOINT_IDCT: 1111 format_config = find_format_config(dec, idct_format_config, num_idct_format_configs); 1112 break; 1113 1114 case PIPE_VIDEO_ENTRYPOINT_MC: 1115 format_config = find_format_config(dec, mc_format_config, num_mc_format_configs); 1116 break; 1117 1118 default: 1119 assert(0); 1120 return NULL; 1121 } 1122 1123 if (!format_config) 1124 return NULL; 1125 1126 if (!init_zscan(dec, format_config)) 1127 goto error_zscan; 1128 1129 if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { 1130 if (!init_idct(dec, format_config)) 1131 goto error_sources; 1132 } else { 1133 if (!init_mc_source_widthout_idct(dec, format_config)) 1134 goto error_sources; 1135 } 1136 1137 if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height, 1138 MACROBLOCK_HEIGHT, format_config->mc_scale, 1139 mc_vert_shader_callback, mc_frag_shader_callback, dec)) 1140 goto error_mc_y; 1141 1142 // TODO 1143 if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height, 1144 BLOCK_HEIGHT, format_config->mc_scale, 1145 mc_vert_shader_callback, mc_frag_shader_callback, dec)) 1146 goto error_mc_c; 1147 1148 if (!init_pipe_state(dec)) 1149 goto error_pipe_state; 1150 1151 memset(dec->intra_matrix, 0x10, 64); 1152 memset(dec->non_intra_matrix, 0x10, 64); 1153 1154 return &dec->base; 1155 1156error_pipe_state: 1157 vl_mc_cleanup(&dec->mc_c); 1158 1159error_mc_c: 1160 vl_mc_cleanup(&dec->mc_y); 1161 1162error_mc_y: 1163 if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) { 1164 vl_idct_cleanup(&dec->idct_y); 1165 vl_idct_cleanup(&dec->idct_c); 1166 dec->idct_source->destroy(dec->idct_source); 1167 } 1168 dec->mc_source->destroy(dec->mc_source); 1169 1170error_sources: 1171 vl_zscan_cleanup(&dec->zscan_y); 1172 vl_zscan_cleanup(&dec->zscan_c); 1173 1174error_zscan: 1175 FREE(dec); 1176 return NULL; 1177} 1178