M4VSS3GPP_EditVideo.c revision 7c9d8018755adf1857571125ba1b3598c96ea506
1/* 2 * Copyright (C) 2004-2011 NXP Software 3 * Copyright (C) 2011 The Android Open Source Project 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 */ 17/** 18 ****************************************************************************** 19 * @file M4VSS3GPP_EditVideo.c 20 * @brief Video Studio Service 3GPP edit API implementation. 21 * @note 22 ****************************************************************************** 23 */ 24 25/****************/ 26/*** Includes ***/ 27/****************/ 28 29#include "NXPSW_CompilerSwitches.h" 30/** 31 * Our header */ 32#include "M4VSS3GPP_API.h" 33#include "M4VSS3GPP_InternalTypes.h" 34#include "M4VSS3GPP_InternalFunctions.h" 35#include "M4VSS3GPP_InternalConfig.h" 36#include "M4VSS3GPP_ErrorCodes.h" 37 38// StageFright encoders require %16 resolution 39#include "M4ENCODER_common.h" 40/** 41 * OSAL headers */ 42#include "M4OSA_Memory.h" /**< OSAL memory management */ 43#include "M4OSA_Debug.h" /**< OSAL debug management */ 44 45/** 46 * component includes */ 47#include "M4VFL_transition.h" /**< video effects */ 48 49/*for transition behaviour*/ 50#include <math.h> 51 52/************************************************************************/ 53/* Static local functions */ 54/************************************************************************/ 55 56static M4OSA_ERR M4VSS3GPP_intCheckVideoMode( 57 M4VSS3GPP_InternalEditContext *pC ); 58static M4OSA_Void 59M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC, 60 M4OSA_UInt8 uiClipNumber ); 61static M4OSA_ERR 62M4VSS3GPP_intApplyVideoEffect( M4VSS3GPP_InternalEditContext *pC,/*M4OSA_UInt8 uiClip1orClip2,*/ 63 M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut ); 64static M4OSA_ERR 65M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC, 66 M4VIFI_ImagePlane *pPlaneOut ); 67 68static M4OSA_Void 69M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC, 70 M4SYS_AccessUnit *pAU ); 71static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer, 72 M4OSA_UInt8 uiCts ); 73static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer, 74 M4OSA_UInt32 uiCtsSec ); 75static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer, 76 M4OSA_UInt32 *pCtsSec ); 77static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes, 78 M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight ); 79 80/** 81 ****************************************************************************** 82 * M4OSA_ERR M4VSS3GPP_intEditStepVideo() 83 * @brief One step of video processing 84 * @param pC (IN/OUT) Internal edit context 85 ****************************************************************************** 86 */ 87M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC ) 88{ 89 M4OSA_ERR err; 90 M4OSA_Int32 iCts, iNextCts; 91 M4ENCODER_FrameMode FrameMode; 92 M4OSA_Bool bSkipFrame; 93 M4OSA_UInt16 offset; 94 95 /** 96 * Check if we reached end cut */ 97 // Decorrelate input and output encoding timestamp to handle encoder prefetch 98 if ( ((M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset) >= pC->pC1->iEndTime ) 99 { 100 /* Re-adjust video to precise cut time */ 101 // Decorrelate input and output encoding timestamp to handle encoder prefetch 102 103 /** 104 * Video is done for this clip */ 105 err = M4VSS3GPP_intReachedEndOfVideo(pC); 106 107 /* RC: to know when a file has been processed */ 108 if (M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP) 109 { 110 M4OSA_TRACE1_1( 111 "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intReachedEndOfVideo returns 0x%x", 112 err); 113 } 114 115 return err; 116 } 117 118 /* Don't change the states if we are in decodeUpTo() */ 119 if ( (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus) 120 && (( pC->pC2 == M4OSA_NULL) 121 || (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)) ) 122 { 123 /** 124 * Check Video Mode, depending on the current output CTS */ 125 err = M4VSS3GPP_intCheckVideoMode( 126 pC); /**< This function change the pC->Vstate variable! */ 127 128 if (M4NO_ERROR != err) 129 { 130 M4OSA_TRACE1_1( 131 "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intCheckVideoMode returns 0x%x!", 132 err); 133 return err; 134 } 135 } 136 137 138 switch( pC->Vstate ) 139 { 140 /* _________________ */ 141 /*| |*/ 142 /*| READ_WRITE MODE |*/ 143 /*|_________________|*/ 144 145 case M4VSS3GPP_kEditVideoState_READ_WRITE: 146 case M4VSS3GPP_kEditVideoState_AFTER_CUT: 147 { 148 M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo READ_WRITE"); 149 150 bSkipFrame = M4OSA_FALSE; 151 152 /** 153 * If we were decoding the clip, we must jump to be sure 154 * to get to the good position. */ 155 if( M4VSS3GPP_kClipStatus_READ != pC->pC1->Vstatus ) 156 { 157 /** 158 * Jump to target video time (tc = to-T) */ 159 // Decorrelate input and output encoding timestamp to handle encoder prefetch 160 iCts = (M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset; 161 err = pC->pC1->ShellAPI.m_pReader->m_pFctJump( 162 pC->pC1->pReaderContext, 163 (M4_StreamHandler *)pC->pC1->pVideoStream, &iCts); 164 165 if( M4NO_ERROR != err ) 166 { 167 M4OSA_TRACE1_1( 168 "M4VSS3GPP_intEditStepVideo:\ 169 READ_WRITE: m_pReader->m_pFctJump(V1) returns 0x%x!", 170 err); 171 return err; 172 } 173 174 err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu( 175 pC->pC1->pReaderContext, 176 (M4_StreamHandler *)pC->pC1->pVideoStream, 177 &pC->pC1->VideoAU); 178 179 if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) ) 180 { 181 M4OSA_TRACE1_1( 182 "M4VSS3GPP_intEditStepVideo:\ 183 READ_WRITE: m_pReader->m_pFctGetNextAu returns 0x%x!", 184 err); 185 return err; 186 } 187 188 M4OSA_TRACE2_3("A .... read : cts = %.0f + %ld [ 0x%x ]", 189 pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset, 190 pC->pC1->VideoAU.m_size); 191 192 /* This frame has been already written in BEGIN CUT step -> skip it */ 193 if( pC->pC1->VideoAU.m_CTS == iCts 194 && pC->pC1->iVideoRenderCts >= iCts ) 195 { 196 bSkipFrame = M4OSA_TRUE; 197 } 198 } 199 200 /* This frame has been already written in BEGIN CUT step -> skip it */ 201 if( ( pC->Vstate == M4VSS3GPP_kEditVideoState_AFTER_CUT) 202 && (pC->pC1->VideoAU.m_CTS 203 + pC->pC1->iVoffset <= pC->ewc.WriterVideoAU.CTS) ) 204 { 205 bSkipFrame = M4OSA_TRUE; 206 } 207 208 /** 209 * Remember the clip reading state */ 210 pC->pC1->Vstatus = M4VSS3GPP_kClipStatus_READ; 211 // Decorrelate input and output encoding timestamp to handle encoder prefetch 212 // Rounding is to compensate reader imprecision (m_CTS is actually an integer) 213 iCts = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pC->pC1->iVoffset - 1; 214 iNextCts = iCts + ((M4OSA_Int32)pC->dOutputFrameDuration) + 1; 215 /* Avoid to write a last frame of duration 0 */ 216 if( iNextCts > pC->pC1->iEndTime ) 217 iNextCts = pC->pC1->iEndTime; 218 219 /** 220 * If the AU is good to be written, write it, else just skip it */ 221 if( ( M4OSA_FALSE == bSkipFrame) 222 && (( pC->pC1->VideoAU.m_CTS >= iCts) 223 && (pC->pC1->VideoAU.m_CTS < iNextCts) 224 && (pC->pC1->VideoAU.m_size > 0)) ) 225 { 226 /** 227 * Get the output AU to write into */ 228 err = pC->ShellAPI.pWriterDataFcts->pStartAU( 229 pC->ewc.p3gpWriterContext, 230 M4VSS3GPP_WRITER_VIDEO_STREAM_ID, 231 &pC->ewc.WriterVideoAU); 232 233 if( M4NO_ERROR != err ) 234 { 235 M4OSA_TRACE1_1( 236 "M4VSS3GPP_intEditStepVideo: READ_WRITE:\ 237 pWriterDataFcts->pStartAU(Video) returns 0x%x!", 238 err); 239 return err; 240 } 241 242 /** 243 * Copy the input AU to the output AU */ 244 pC->ewc.WriterVideoAU.attribute = pC->pC1->VideoAU.m_attribute; 245 // Decorrelate input and output encoding timestamp to handle encoder prefetch 246 pC->ewc.WriterVideoAU.CTS = (M4OSA_Time)pC->pC1->VideoAU.m_CTS + 247 (M4OSA_Time)pC->pC1->iVoffset; 248 pC->ewc.dInputVidCts += pC->dOutputFrameDuration; 249 offset = 0; 250 /* for h.264 stream do not read the 1st 4 bytes as they are header 251 indicators */ 252 if( pC->pC1->pVideoStream->m_basicProperties.m_streamType 253 == M4DA_StreamTypeVideoMpeg4Avc ) 254 offset = 4; 255 256 pC->ewc.WriterVideoAU.size = pC->pC1->VideoAU.m_size - offset; 257 if( pC->ewc.WriterVideoAU.size > pC->ewc.uiVideoMaxAuSize ) 258 { 259 M4OSA_TRACE1_2( 260 "M4VSS3GPP_intEditStepVideo: READ_WRITE: AU size greater than\ 261 MaxAuSize (%d>%d)! returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE", 262 pC->ewc.WriterVideoAU.size, pC->ewc.uiVideoMaxAuSize); 263 return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE; 264 } 265 266 M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterVideoAU.dataAddress, 267 (pC->pC1->VideoAU.m_dataAddress + offset), 268 (pC->ewc.WriterVideoAU.size)); 269 270 /** 271 * Update time info for the Counter Time System to be equal to the bit 272 -stream time*/ 273 M4VSS3GPP_intUpdateTimeInfo(pC, &pC->ewc.WriterVideoAU); 274 M4OSA_TRACE2_2("B ---- write : cts = %lu [ 0x%x ]", 275 pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size); 276 277 /** 278 * Write the AU */ 279 err = pC->ShellAPI.pWriterDataFcts->pProcessAU( 280 pC->ewc.p3gpWriterContext, 281 M4VSS3GPP_WRITER_VIDEO_STREAM_ID, 282 &pC->ewc.WriterVideoAU); 283 284 if( M4NO_ERROR != err ) 285 { 286 /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output 287 file size is reached 288 The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE 289 is returned*/ 290 if( M4WAR_WRITER_STOP_REQ == err ) 291 { 292 M4OSA_TRACE1_0( 293 "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize"); 294 return M4VSS3GPP_WAR_EDITING_DONE; 295 } 296 else 297 { 298 M4OSA_TRACE1_1( 299 "M4VSS3GPP_intEditStepVideo: READ_WRITE:\ 300 pWriterDataFcts->pProcessAU(Video) returns 0x%x!", 301 err); 302 return err; 303 } 304 } 305 306 /** 307 * Read next AU for next step */ 308 err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu( 309 pC->pC1->pReaderContext, 310 (M4_StreamHandler *)pC->pC1->pVideoStream, 311 &pC->pC1->VideoAU); 312 313 if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) ) 314 { 315 M4OSA_TRACE1_1( 316 "M4VSS3GPP_intEditStepVideo: READ_WRITE:\ 317 m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!", 318 err); 319 return err; 320 } 321 322 M4OSA_TRACE2_3("C .... read : cts = %.0f + %ld [ 0x%x ]", 323 pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset, 324 pC->pC1->VideoAU.m_size); 325 } 326 else 327 { 328 /** 329 * Decide wether to read or to increment time increment */ 330 if( ( pC->pC1->VideoAU.m_size == 0) 331 || (pC->pC1->VideoAU.m_CTS >= iNextCts) ) 332 { 333 /*Increment time by the encoding period (NO_MORE_AU or reader in advance */ 334 // Decorrelate input and output encoding timestamp to handle encoder prefetch 335 pC->ewc.dInputVidCts += pC->dOutputFrameDuration; 336 337 /* Switch (from AFTER_CUT) to normal mode because time is 338 no more frozen */ 339 pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE; 340 } 341 else 342 { 343 /* In other cases (reader late), just let the reader catch up 344 pC->ewc.dVTo */ 345 err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu( 346 pC->pC1->pReaderContext, 347 (M4_StreamHandler *)pC->pC1->pVideoStream, 348 &pC->pC1->VideoAU); 349 350 if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) ) 351 { 352 M4OSA_TRACE1_1( 353 "M4VSS3GPP_intEditStepVideo: READ_WRITE:\ 354 m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!", 355 err); 356 return err; 357 } 358 359 M4OSA_TRACE2_3("D .... read : cts = %.0f + %ld [ 0x%x ]", 360 pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset, 361 pC->pC1->VideoAU.m_size); 362 } 363 } 364 } 365 break; 366 367 /* ____________________ */ 368 /*| |*/ 369 /*| DECODE_ENCODE MODE |*/ 370 /*| BEGIN_CUT MODE |*/ 371 /*|____________________|*/ 372 373 case M4VSS3GPP_kEditVideoState_DECODE_ENCODE: 374 case M4VSS3GPP_kEditVideoState_BEGIN_CUT: 375 { 376 M4OSA_TRACE3_0( 377 "M4VSS3GPP_intEditStepVideo DECODE_ENCODE / BEGIN_CUT"); 378 379 /** 380 * Decode the video up to the target time 381 (will jump to the previous RAP if needed ) */ 382 // Decorrelate input and output encoding timestamp to handle encoder prefetch 383 err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1, (M4OSA_Int32)pC->ewc.dInputVidCts); 384 if( M4NO_ERROR != err ) 385 { 386 M4OSA_TRACE1_1( 387 "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\ 388 M4VSS3GPP_intDecodeVideoUpToCts returns err=0x%x", 389 err); 390 return err; 391 } 392 393 /* If the decoding is not completed, do one more step with time frozen */ 394 if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus ) 395 { 396 return M4NO_ERROR; 397 } 398 399 /** 400 * Reset the video pre-processing error before calling the encoder */ 401 pC->ewc.VppError = M4NO_ERROR; 402 403 M4OSA_TRACE2_0("E ++++ encode AU"); 404 405 /** 406 * Encode the frame(rendering,filtering and writing will be done 407 in encoder callbacks)*/ 408 if( pC->Vstate == M4VSS3GPP_kEditVideoState_BEGIN_CUT ) 409 FrameMode = M4ENCODER_kIFrame; 410 else 411 FrameMode = M4ENCODER_kNormalFrame; 412 413 // Decorrelate input and output encoding timestamp to handle encoder prefetch 414 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL, 415 pC->ewc.dInputVidCts, FrameMode); 416 /** 417 * Check if we had a VPP error... */ 418 if( M4NO_ERROR != pC->ewc.VppError ) 419 { 420 M4OSA_TRACE1_1( 421 "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\ 422 pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x", 423 pC->ewc.VppError); 424#ifdef M4VSS_SUPPORT_OMX_CODECS 425 426 if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError ) 427 { 428#endif //M4VSS_SUPPORT_OMX_CODECS 429 430 return pC->ewc.VppError; 431#ifdef M4VSS_SUPPORT_OMX_CODECS 432 433 } 434 435#endif //M4VSS_SUPPORT_OMX_CODECS 436 437 } 438 else if( M4NO_ERROR != err ) /**< ...or an encoder error */ 439 { 440 if( ((M4OSA_UInt32)M4ERR_ALLOC) == err ) 441 { 442 M4OSA_TRACE1_0( 443 "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\ 444 returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR"); 445 return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR; 446 } 447 /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output 448 file size is reached 449 The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE 450 is returned*/ 451 else if( M4WAR_WRITER_STOP_REQ == err ) 452 { 453 M4OSA_TRACE1_0( 454 "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize"); 455 return M4VSS3GPP_WAR_EDITING_DONE; 456 } 457 else 458 { 459 M4OSA_TRACE1_1( 460 "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\ 461 pVideoEncoderGlobalFcts->pFctEncode returns 0x%x", 462 err); 463 return err; 464 } 465 } 466 467 /** 468 * Increment time by the encoding period (for begin cut, do not increment to not 469 loose P-frames) */ 470 if( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate ) 471 { 472 // Decorrelate input and output encoding timestamp to handle encoder prefetch 473 pC->ewc.dInputVidCts += pC->dOutputFrameDuration; 474 } 475 } 476 break; 477 478 /* _________________ */ 479 /*| |*/ 480 /*| TRANSITION MODE |*/ 481 /*|_________________|*/ 482 483 case M4VSS3GPP_kEditVideoState_TRANSITION: 484 { 485 M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo TRANSITION"); 486 487 /* Don't decode more than needed */ 488 if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus) 489 && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus)) ) 490 { 491 /** 492 * Decode the clip1 video up to the target time 493 (will jump to the previous RAP if needed */ 494 // Decorrelate input and output encoding timestamp to handle encoder prefetch 495 err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1, 496 (M4OSA_Int32)pC->ewc.dInputVidCts); 497 if( M4NO_ERROR != err ) 498 { 499 M4OSA_TRACE1_1( 500 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 501 M4VSS3GPP_intDecodeVideoUpToCts(C1) returns err=0x%x", 502 err); 503 return err; 504 } 505 506 /* If the decoding is not completed, do one more step with time frozen */ 507 if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus ) 508 { 509 return M4NO_ERROR; 510 } 511 } 512 513 /* Don't decode more than needed */ 514 if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus) 515 && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus)) ) 516 { 517 /** 518 * Decode the clip2 video up to the target time 519 (will jump to the previous RAP if needed) */ 520 // Decorrelate input and output encoding timestamp to handle encoder prefetch 521 err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC2, 522 (M4OSA_Int32)pC->ewc.dInputVidCts); 523 if( M4NO_ERROR != err ) 524 { 525 M4OSA_TRACE1_1( 526 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 527 M4VSS3GPP_intDecodeVideoUpToCts(C2) returns err=0x%x", 528 err); 529 return err; 530 } 531 532 /* If the decoding is not completed, do one more step with time frozen */ 533 if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus ) 534 { 535 return M4NO_ERROR; 536 } 537 } 538 539 /** 540 * Reset the video pre-processing error before calling the encoder */ 541 pC->ewc.VppError = M4NO_ERROR; 542 543 M4OSA_TRACE2_0("F **** blend AUs"); 544 545 /** 546 * Encode the frame (rendering, filtering and writing will be done 547 in encoder callbacks */ 548 // Decorrelate input and output encoding timestamp to handle encoder prefetch 549 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL, 550 pC->ewc.dInputVidCts, M4ENCODER_kNormalFrame); 551 552 /** 553 * If encode returns a process frame error, it is likely to be a VPP error */ 554 if( M4NO_ERROR != pC->ewc.VppError ) 555 { 556 M4OSA_TRACE1_1( 557 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 558 pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x", 559 pC->ewc.VppError); 560#ifdef M4VSS_SUPPORT_OMX_CODECS 561 562 if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError ) 563 { 564 565#endif //M4VSS_SUPPORT_OMX_CODECS 566 567 return pC->ewc.VppError; 568#ifdef M4VSS_SUPPORT_OMX_CODECS 569 570 } 571 572#endif //M4VSS_SUPPORT_OMX_CODECS 573 574 } 575 else if( M4NO_ERROR != err ) /**< ...or an encoder error */ 576 { 577 if( ((M4OSA_UInt32)M4ERR_ALLOC) == err ) 578 { 579 M4OSA_TRACE1_0( 580 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 581 returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR"); 582 return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR; 583 } 584 585 /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output 586 file size is reached 587 The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE is 588 returned*/ 589 else if( M4WAR_WRITER_STOP_REQ == err ) 590 { 591 M4OSA_TRACE1_0( 592 "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize"); 593 return M4VSS3GPP_WAR_EDITING_DONE; 594 } 595 else 596 { 597 M4OSA_TRACE1_1( 598 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 599 pVideoEncoderGlobalFcts->pFctEncode returns 0x%x", 600 err); 601 return err; 602 } 603 } 604 605 /** 606 * Increment time by the encoding period */ 607 // Decorrelate input and output encoding timestamp to handle encoder prefetch 608 pC->ewc.dInputVidCts += pC->dOutputFrameDuration; 609 } 610 break; 611 612 /* ____________ */ 613 /*| |*/ 614 /*| ERROR CASE |*/ 615 /*|____________|*/ 616 617 default: 618 M4OSA_TRACE1_1( 619 "M4VSS3GPP_intEditStepVideo: invalid internal state (0x%x),\ 620 returning M4VSS3GPP_ERR_INTERNAL_STATE", 621 pC->Vstate); 622 return M4VSS3GPP_ERR_INTERNAL_STATE; 623 } 624 625 /** 626 * Return with no error */ 627 M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo: returning M4NO_ERROR"); 628 return M4NO_ERROR; 629} 630 631/** 632 ****************************************************************************** 633 * M4OSA_ERR M4VSS3GPP_intCheckVideoMode() 634 * @brief Check which video process mode we must use, depending on the output CTS. 635 * @param pC (IN/OUT) Internal edit context 636 ****************************************************************************** 637 */ 638static M4OSA_ERR M4VSS3GPP_intCheckVideoMode( 639 M4VSS3GPP_InternalEditContext *pC ) 640{ 641 M4OSA_ERR err; 642 // Decorrelate input and output encoding timestamp to handle encoder prefetch 643 const M4OSA_Int32 t = (M4OSA_Int32)pC->ewc.dInputVidCts; 644 /**< Transition duration */ 645 const M4OSA_Int32 TD = pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration; 646 647 M4OSA_Int32 iTmp; 648 649 const M4VSS3GPP_EditVideoState previousVstate = pC->Vstate; 650 651 /** 652 * Check if Clip1 is on its begin cut, or in an effect zone */ 653 M4VSS3GPP_intCheckVideoEffects(pC, 1); 654 655 /** 656 * Check if we are in the transition with next clip */ 657 if( ( TD > 0) && (( t - pC->pC1->iVoffset) >= (pC->pC1->iEndTime - TD)) ) 658 { 659 /** 660 * We are in a transition */ 661 pC->Vstate = M4VSS3GPP_kEditVideoState_TRANSITION; 662 pC->bTransitionEffect = M4OSA_TRUE; 663 664 /** 665 * Open second clip for transition, if not yet opened */ 666 if( M4OSA_NULL == pC->pC2 ) 667 { 668 err = M4VSS3GPP_intOpenClip(pC, &pC->pC2, 669 &pC->pClipList[pC->uiCurrentClip + 1]); 670 671 if( M4NO_ERROR != err ) 672 { 673 M4OSA_TRACE1_1( 674 "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_editOpenClip returns 0x%x!", 675 err); 676 return err; 677 } 678 679 /** 680 * Add current video output CTS to the clip offset 681 * (audio output CTS is not yet at the transition, so audio 682 * offset can't be updated yet). */ 683 // Decorrelate input and output encoding timestamp to handle encoder prefetch 684 pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts; 685 686 /** 687 * 2005-03-24: BugFix for audio-video synchro: 688 * Update transition duration due to the actual video transition beginning time. 689 * It will avoid desynchronization when doing the audio transition. */ 690 // Decorrelate input and output encoding timestamp to handle encoder prefetch 691 iTmp = ((M4OSA_Int32)pC->ewc.dInputVidCts)\ 692 - (pC->pC1->iEndTime - TD + pC->pC1->iVoffset); 693 if (iTmp < (M4OSA_Int32)pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration) 694 /**< Test in case of a very short transition */ 695 { 696 pC->pTransitionList[pC-> 697 uiCurrentClip].uiTransitionDuration -= iTmp; 698 699 /** 700 * Don't forget to also correct the total duration used for the progress bar 701 * (it was computed with the original transition duration). */ 702 pC->ewc.iOutputDuration += iTmp; 703 } 704 /**< No "else" here because it's hard predict the effect of 0 duration transition...*/ 705 } 706 707 /** 708 * Check effects for clip2 */ 709 M4VSS3GPP_intCheckVideoEffects(pC, 2); 710 } 711 else 712 { 713 /** 714 * We are not in a transition */ 715 pC->bTransitionEffect = M4OSA_FALSE; 716 717 /* If there is an effect we go to decode/encode mode */ 718 if ((pC->nbActiveEffects > 0) ||(pC->nbActiveEffects1 > 0)) 719 { 720 pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE; 721 } 722 /* We do a begin cut, except if already done (time is not progressing because we want 723 to catch all P-frames after the cut) */ 724 else if( M4OSA_TRUE == pC->bClip1AtBeginCut ) 725 { 726 if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate) 727 || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) ) 728 pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT; 729 else 730 pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT; 731 } 732 /* Else we are in default copy/paste mode */ 733 else 734 { 735 if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate) 736 || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) ) 737 { 738 pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT; 739 } 740 else if( pC->bIsMMS == M4OSA_TRUE ) 741 { 742 M4OSA_UInt32 currentBitrate; 743 M4OSA_ERR err = M4NO_ERROR; 744 745 /* Do we need to reencode the video to downgrade the bitrate or not ? */ 746 /* Let's compute the cirrent bitrate of the current edited clip */ 747 err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption( 748 pC->pC1->pReaderContext, 749 M4READER_kOptionID_Bitrate, ¤tBitrate); 750 751 if( err != M4NO_ERROR ) 752 { 753 M4OSA_TRACE1_1( 754 "M4VSS3GPP_intCheckVideoMode:\ 755 Error when getting next bitrate of edited clip: 0x%x", 756 err); 757 return err; 758 } 759 760 /* Remove audio bitrate */ 761 currentBitrate -= 12200; 762 763 /* Test if we go into copy/paste mode or into decode/encode mode */ 764 if( currentBitrate > pC->uiMMSVideoBitrate ) 765 { 766 pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE; 767 } 768 else 769 { 770 pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE; 771 } 772 } 773 else 774 { 775 pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE; 776 } 777 } 778 } 779 780 /** 781 * Check if we create an encoder */ 782 if( ( ( M4VSS3GPP_kEditVideoState_READ_WRITE == previousVstate) 783 || (M4VSS3GPP_kEditVideoState_AFTER_CUT 784 == previousVstate)) /**< read mode */ 785 && (( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate) 786 || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == pC->Vstate) 787 || (M4VSS3GPP_kEditVideoState_TRANSITION 788 == pC->Vstate)) /**< encode mode */ 789 && pC->bIsMMS == M4OSA_FALSE ) 790 { 791 /** 792 * Create the encoder */ 793 err = M4VSS3GPP_intCreateVideoEncoder(pC); 794 795 if( M4NO_ERROR != err ) 796 { 797 M4OSA_TRACE1_1( 798 "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!", 799 err); 800 return err; 801 } 802 } 803 else if( pC->bIsMMS == M4OSA_TRUE && pC->ewc.pEncContext == M4OSA_NULL ) 804 { 805 /** 806 * Create the encoder */ 807 err = M4VSS3GPP_intCreateVideoEncoder(pC); 808 809 if( M4NO_ERROR != err ) 810 { 811 M4OSA_TRACE1_1( 812 "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!", 813 err); 814 return err; 815 } 816 } 817 818 /** 819 * When we go from filtering to read/write, we must act like a begin cut, 820 * because the last filtered image may be different than the original image. */ 821 else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate) 822 || (M4VSS3GPP_kEditVideoState_TRANSITION 823 == previousVstate)) /**< encode mode */ 824 && (M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate) /**< read mode */ 825 ) 826 { 827 pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT; 828 } 829 830 /** 831 * Check if we destroy an encoder */ 832 else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate) 833 || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate) 834 || (M4VSS3GPP_kEditVideoState_TRANSITION 835 == previousVstate)) /**< encode mode */ 836 && (( M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate) 837 || (M4VSS3GPP_kEditVideoState_AFTER_CUT 838 == pC->Vstate)) /**< read mode */ 839 && pC->bIsMMS == M4OSA_FALSE ) 840 { 841 /** 842 * Destroy the previously created encoder */ 843 err = M4VSS3GPP_intDestroyVideoEncoder(pC); 844 845 if( M4NO_ERROR != err ) 846 { 847 M4OSA_TRACE1_1( 848 "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intDestroyVideoEncoder returns 0x%x!", 849 err); 850 return err; 851 } 852 } 853 854 /** 855 * Return with no error */ 856 M4OSA_TRACE3_0("M4VSS3GPP_intCheckVideoMode: returning M4NO_ERROR"); 857 return M4NO_ERROR; 858} 859 860/****************************************************************************** 861 * M4OSA_ERR M4VSS3GPP_intStartAU() 862 * @brief StartAU writer-like interface used for the VSS 3GPP only 863 * @note 864 * @param pContext: (IN) It is the VSS 3GPP context in our case 865 * @param streamID: (IN) Id of the stream to which the Access Unit is related. 866 * @param pAU: (IN/OUT) Access Unit to be prepared. 867 * @return M4NO_ERROR: there is no error 868 ****************************************************************************** 869 */ 870M4OSA_ERR M4VSS3GPP_intStartAU( M4WRITER_Context pContext, 871 M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU ) 872{ 873 M4OSA_ERR err; 874 M4OSA_UInt32 uiMaxAuSize; 875 876 /** 877 * Given context is actually the VSS3GPP context */ 878 M4VSS3GPP_InternalEditContext *pC = 879 (M4VSS3GPP_InternalEditContext *)pContext; 880 881 /** 882 * Get the output AU to write into */ 883 err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext, 884 M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU); 885 886 if( M4NO_ERROR != err ) 887 { 888 M4OSA_TRACE1_1( 889 "M4VSS3GPP_intStartAU: pWriterDataFcts->pStartAU(Video) returns 0x%x!", 890 err); 891 return err; 892 } 893 894 /** 895 * Return */ 896 M4OSA_TRACE3_0("M4VSS3GPP_intStartAU: returning M4NO_ERROR"); 897 return M4NO_ERROR; 898} 899 900/****************************************************************************** 901 * M4OSA_ERR M4VSS3GPP_intProcessAU() 902 * @brief ProcessAU writer-like interface used for the VSS 3GPP only 903 * @note 904 * @param pContext: (IN) It is the VSS 3GPP context in our case 905 * @param streamID: (IN) Id of the stream to which the Access Unit is related. 906 * @param pAU: (IN/OUT) Access Unit to be written 907 * @return M4NO_ERROR: there is no error 908 ****************************************************************************** 909 */ 910M4OSA_ERR M4VSS3GPP_intProcessAU( M4WRITER_Context pContext, 911 M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU ) 912{ 913 M4OSA_ERR err; 914 915 /** 916 * Given context is actually the VSS3GPP context */ 917 M4VSS3GPP_InternalEditContext *pC = 918 (M4VSS3GPP_InternalEditContext *)pContext; 919 920 /** 921 * Fix the encoded AU time */ 922 // Decorrelate input and output encoding timestamp to handle encoder prefetch 923 pC->ewc.dOutputVidCts = pAU->CTS; 924 /** 925 * Update time info for the Counter Time System to be equal to the bit-stream time */ 926 M4VSS3GPP_intUpdateTimeInfo(pC, pAU); 927 928 /** 929 * Write the AU */ 930 err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext, 931 M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU); 932 933 if( M4NO_ERROR != err ) 934 { 935 M4OSA_TRACE1_1( 936 "M4VSS3GPP_intProcessAU: pWriterDataFcts->pProcessAU(Video) returns 0x%x!", 937 err); 938 return err; 939 } 940 941 /** 942 * Return */ 943 M4OSA_TRACE3_0("M4VSS3GPP_intProcessAU: returning M4NO_ERROR"); 944 return M4NO_ERROR; 945} 946 947/** 948 ****************************************************************************** 949 * M4OSA_ERR M4VSS3GPP_intVPP() 950 * @brief We implement our own VideoPreProcessing function 951 * @note It is called by the video encoder 952 * @param pContext (IN) VPP context, which actually is the VSS 3GPP context in our case 953 * @param pPlaneIn (IN) 954 * @param pPlaneOut (IN/OUT) Pointer to an array of 3 planes that will contain the output 955 * YUV420 image 956 * @return M4NO_ERROR: No error 957 ****************************************************************************** 958 */ 959M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn, 960 M4VIFI_ImagePlane *pPlaneOut ) 961{ 962 M4OSA_ERR err; 963 M4_MediaTime t; 964 M4VIFI_ImagePlane *pTmp = M4OSA_NULL; 965 M4VIFI_ImagePlane pTemp1[3],pTemp2[3]; 966 M4OSA_UInt32 i =0; 967 /** 968 * VPP context is actually the VSS3GPP context */ 969 M4VSS3GPP_InternalEditContext *pC = 970 (M4VSS3GPP_InternalEditContext *)pContext; 971 pTemp1[0].pac_data = pTemp2[0].pac_data = M4OSA_NULL; 972 /** 973 * Reset VPP error remembered in context */ 974 pC->ewc.VppError = M4NO_ERROR; 975 976 /** 977 * At the end of the editing, we may be called when no more clip is loaded. 978 * (because to close the encoder properly it must be stepped one or twice...) */ 979 if( M4OSA_NULL == pC->pC1 ) 980 { 981 /** 982 * We must fill the input of the encoder with a dummy image, because 983 * encoding noise leads to a huge video AU, and thus a writer buffer overflow. */ 984 M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[0].pac_data, 985 pPlaneOut[0].u_stride * pPlaneOut[0].u_height, 0); 986 M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[1].pac_data, 987 pPlaneOut[1].u_stride * pPlaneOut[1].u_height, 0); 988 M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[2].pac_data, 989 pPlaneOut[2].u_stride * pPlaneOut[2].u_height, 0); 990 991 M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR (abort)"); 992 return M4NO_ERROR; 993 } 994 995 /** 996 **************** Transition case ****************/ 997 if( M4OSA_TRUE == pC->bTransitionEffect ) 998 { 999 if (M4OSA_NULL == pTemp1[0].pac_data) 1000 { 1001 err = M4VSS3GPP_intAllocateYUV420(pTemp1, pC->ewc.uiVideoWidth, 1002 pC->ewc.uiVideoHeight); 1003 if (M4NO_ERROR != err) 1004 { 1005 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(1) returns 0x%x, \ 1006 returning M4NO_ERROR", err); 1007 pC->ewc.VppError = err; 1008 return M4NO_ERROR; /**< Return no error to the encoder core 1009 (else it may leak in some situations...) */ 1010 } 1011 } 1012 if (M4OSA_NULL == pTemp2[0].pac_data) 1013 { 1014 err = M4VSS3GPP_intAllocateYUV420(pTemp2, pC->ewc.uiVideoWidth, 1015 pC->ewc.uiVideoHeight); 1016 if (M4NO_ERROR != err) 1017 { 1018 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(2) returns 0x%x, \ 1019 returning M4NO_ERROR", err); 1020 pC->ewc.VppError = err; 1021 return M4NO_ERROR; /**< Return no error to the encoder core 1022 (else it may leak in some situations...) */ 1023 } 1024 } 1025 /** 1026 * We need two intermediate planes */ 1027 if( M4OSA_NULL == pC->yuv1[0].pac_data ) 1028 { 1029 err = M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth, 1030 pC->ewc.uiVideoHeight); 1031 1032 if( M4NO_ERROR != err ) 1033 { 1034 M4OSA_TRACE1_1( 1035 "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\ 1036 returning M4NO_ERROR", 1037 err); 1038 pC->ewc.VppError = err; 1039 return 1040 M4NO_ERROR; /**< Return no error to the encoder core 1041 (else it may leak in some situations...) */ 1042 } 1043 } 1044 1045 if( M4OSA_NULL == pC->yuv2[0].pac_data ) 1046 { 1047 err = M4VSS3GPP_intAllocateYUV420(pC->yuv2, pC->ewc.uiVideoWidth, 1048 pC->ewc.uiVideoHeight); 1049 1050 if( M4NO_ERROR != err ) 1051 { 1052 M4OSA_TRACE1_1( 1053 "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\ 1054 returning M4NO_ERROR", 1055 err); 1056 pC->ewc.VppError = err; 1057 return 1058 M4NO_ERROR; /**< Return no error to the encoder core 1059 (else it may leak in some situations...) */ 1060 } 1061 } 1062 1063 /** 1064 * Allocate new temporary plane if needed */ 1065 if( M4OSA_NULL == pC->yuv3[0].pac_data ) 1066 { 1067 err = M4VSS3GPP_intAllocateYUV420(pC->yuv3, pC->ewc.uiVideoWidth, 1068 pC->ewc.uiVideoHeight); 1069 1070 if( M4NO_ERROR != err ) 1071 { 1072 M4OSA_TRACE1_1( 1073 "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\ 1074 returning M4NO_ERROR", 1075 err); 1076 pC->ewc.VppError = err; 1077 return 1078 M4NO_ERROR; /**< Return no error to the encoder core 1079 (else it may leak in some situations...) */ 1080 } 1081 } 1082 1083 /** 1084 * Compute the time in the clip1 base: t = to - Offset */ 1085 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1086 t = pC->ewc.dInputVidCts - pC->pC1->iVoffset; 1087 1088 /** 1089 * Render Clip1 */ 1090 if( pC->pC1->isRenderDup == M4OSA_FALSE ) 1091 { 1092 if(pC->nbActiveEffects > 0) 1093 { 1094 err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC1->pViDecCtxt, 1095 &t, pTemp1, 1096 M4OSA_TRUE); 1097 if (M4NO_ERROR != err) 1098 { 1099 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C1) returns 0x%x, \ 1100 returning M4NO_ERROR", err); 1101 pC->ewc.VppError = err; 1102 return M4NO_ERROR; /**< Return no error to the encoder core 1103 (else it may leak in some situations...) */ 1104 } 1105 pC->bIssecondClip = M4OSA_FALSE; 1106 err = M4VSS3GPP_intApplyVideoEffect(pC, pTemp1 ,pC->yuv1 ); 1107 if (M4NO_ERROR != err) 1108 { 1109 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x, \ 1110 returning M4NO_ERROR", err); 1111 pC->ewc.VppError = err; 1112 return M4NO_ERROR; /**< Return no error to the encoder core 1113 (else it may leak in some situations...) */ 1114 } 1115 pC->pC1->lastDecodedPlane = pTemp1; 1116 } 1117 else 1118 { 1119 err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC1->pViDecCtxt, 1120 &t, pC->yuv1, 1121 M4OSA_TRUE); 1122 if (M4NO_ERROR != err) 1123 { 1124 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C1) returns 0x%x, \ 1125 returning M4NO_ERROR", err); 1126 pC->ewc.VppError = err; 1127 return M4NO_ERROR; /**< Return no error to the encoder core 1128 (else it may leak in some situations...) */ 1129 } 1130 pC->pC1->lastDecodedPlane = pC->yuv1; 1131 } 1132 pC->pC1->iVideoRenderCts = (M4OSA_Int32)t; 1133 } 1134 else 1135 { 1136 /* Copy last decoded plane to output plane */ 1137 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data, 1138 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[0].pac_data, 1139 (pTmp[0].u_height * pTmp[0].u_width)); 1140 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data, 1141 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[1].pac_data, 1142 (pTmp[1].u_height * pTmp[1].u_width)); 1143 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data, 1144 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[2].pac_data, 1145 (pTmp[2].u_height * pTmp[2].u_width)); 1146 pC->pC1->lastDecodedPlane = pTmp; 1147 } 1148 1149 /** 1150 * Compute the time in the clip2 base: t = to - Offset */ 1151 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1152 t = pC->ewc.dInputVidCts - pC->pC2->iVoffset; 1153 /** 1154 * Render Clip2 */ 1155 if( pC->pC2->isRenderDup == M4OSA_FALSE ) 1156 { 1157 if(pC->nbActiveEffects1 > 0) 1158 { 1159 err = pC->pC2->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC2->pViDecCtxt, 1160 &t, pTemp2, 1161 M4OSA_TRUE); 1162 if (M4NO_ERROR != err) 1163 { 1164 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C2) returns 0x%x, \ 1165 returning M4NO_ERROR", err); 1166 pC->ewc.VppError = err; 1167 return M4NO_ERROR; /**< Return no error to the encoder core 1168 (else it may leak in some situations...) */ 1169 } 1170 1171 pC->bIssecondClip = M4OSA_TRUE; 1172 err = M4VSS3GPP_intApplyVideoEffect(pC, pTemp2 ,pC->yuv2); 1173 if (M4NO_ERROR != err) 1174 { 1175 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x, \ 1176 returning M4NO_ERROR", err); 1177 pC->ewc.VppError = err; 1178 return M4NO_ERROR; /**< Return no error to the encoder core 1179 (else it may leak in some situations...) */ 1180 } 1181 pC->pC2->lastDecodedPlane = pTemp2; 1182 } 1183 else 1184 { 1185 err = pC->pC2->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC2->pViDecCtxt, 1186 &t, pC->yuv2, 1187 M4OSA_TRUE); 1188 if (M4NO_ERROR != err) 1189 { 1190 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C2) returns 0x%x, \ 1191 returning M4NO_ERROR", err); 1192 pC->ewc.VppError = err; 1193 return M4NO_ERROR; /**< Return no error to the encoder core 1194 (else it may leak in some situations...) */ 1195 } 1196 pC->pC2->lastDecodedPlane = pC->yuv2; 1197 } 1198 pC->pC2->iVideoRenderCts = (M4OSA_Int32)t; 1199 } 1200 else 1201 { 1202 /* Copy last decoded plane to output plane */ 1203 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data, 1204 (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[0].pac_data, 1205 (pTmp[0].u_height * pTmp[0].u_width)); 1206 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data, 1207 (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[1].pac_data, 1208 (pTmp[1].u_height * pTmp[1].u_width)); 1209 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data, 1210 (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[2].pac_data, 1211 (pTmp[2].u_height * pTmp[2].u_width)); 1212 pC->pC2->lastDecodedPlane = pTmp; 1213 } 1214 1215 1216 pTmp = pPlaneOut; 1217 err = M4VSS3GPP_intVideoTransition(pC, pTmp); 1218 1219 if( M4NO_ERROR != err ) 1220 { 1221 M4OSA_TRACE1_1( 1222 "M4VSS3GPP_intVPP: M4VSS3GPP_intVideoTransition returns 0x%x,\ 1223 returning M4NO_ERROR", 1224 err); 1225 pC->ewc.VppError = err; 1226 return M4NO_ERROR; /**< Return no error to the encoder core 1227 (else it may leak in some situations...) */ 1228 } 1229 for (i=0; i < 3; i++) 1230 { 1231 if (pTemp2[i].pac_data != M4OSA_NULL) 1232 { 1233 M4OSA_free((M4OSA_MemAddr32)pTemp2[i].pac_data); 1234 pTemp2[i].pac_data = M4OSA_NULL; 1235 } 1236 1237 1238 if (pTemp1[i].pac_data != M4OSA_NULL) 1239 { 1240 M4OSA_free((M4OSA_MemAddr32)pTemp1[i].pac_data); 1241 pTemp1[i].pac_data = M4OSA_NULL; 1242 } 1243 } 1244 } 1245 /** 1246 **************** No Transition case ****************/ 1247 else 1248 { 1249 /** 1250 * Check if there is a filter */ 1251 if( pC->nbActiveEffects > 0 ) 1252 { 1253 /** 1254 * If we do modify the image, we need an intermediate image plane */ 1255 if( M4OSA_NULL == pC->yuv1[0].pac_data ) 1256 { 1257 err = 1258 M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth, 1259 pC->ewc.uiVideoHeight); 1260 1261 if( M4NO_ERROR != err ) 1262 { 1263 M4OSA_TRACE1_1( 1264 "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420 returns 0x%x,\ 1265 returning M4NO_ERROR", 1266 err); 1267 pC->ewc.VppError = err; 1268 return 1269 M4NO_ERROR; /**< Return no error to the encoder core 1270 (else it may leak in some situations...) */ 1271 } 1272 } 1273 /** 1274 * The image is rendered in the intermediate image plane */ 1275 pTmp = pC->yuv1; 1276 } 1277 else 1278 { 1279 /** 1280 * No filter, the image is directly rendered in pPlaneOut */ 1281 pTmp = pPlaneOut; 1282 } 1283 1284 /** 1285 * Compute the time in the clip base: t = to - Offset */ 1286 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1287 t = pC->ewc.dInputVidCts - pC->pC1->iVoffset; 1288 1289 if( pC->pC1->isRenderDup == M4OSA_FALSE ) 1290 { 1291 err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender( 1292 pC->pC1->pViDecCtxt, &t, pTmp, M4OSA_TRUE); 1293 1294 if( M4NO_ERROR != err ) 1295 { 1296 M4OSA_TRACE1_1( 1297 "M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender returns 0x%x,\ 1298 returning M4NO_ERROR", 1299 err); 1300 pC->ewc.VppError = err; 1301 return 1302 M4NO_ERROR; /**< Return no error to the encoder core 1303 (else it may leak in some situations...) */ 1304 } 1305 pC->pC1->lastDecodedPlane = pTmp; 1306 pC->pC1->iVideoRenderCts = (M4OSA_Int32)t; 1307 } 1308 else 1309 { 1310 /* Copy last decoded plane to output plane */ 1311 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data, 1312 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[0].pac_data, 1313 (pTmp[0].u_height * pTmp[0].u_width)); 1314 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data, 1315 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[1].pac_data, 1316 (pTmp[1].u_height * pTmp[1].u_width)); 1317 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data, 1318 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[2].pac_data, 1319 (pTmp[2].u_height * pTmp[2].u_width)); 1320 pC->pC1->lastDecodedPlane = pTmp; 1321 } 1322 1323 M4OSA_TRACE3_1("M4VSS3GPP_intVPP: Rendered at CTS %.3f", t); 1324 1325 /** 1326 * Apply the clip1 effect */ 1327 // if (pC->iClip1ActiveEffect >= 0) 1328 if( pC->nbActiveEffects > 0 ) 1329 { 1330 err = M4VSS3GPP_intApplyVideoEffect(pC,/*1,*/ pC->yuv1, pPlaneOut); 1331 1332 if( M4NO_ERROR != err ) 1333 { 1334 M4OSA_TRACE1_1( 1335 "M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x,\ 1336 returning M4NO_ERROR", 1337 err); 1338 pC->ewc.VppError = err; 1339 return 1340 M4NO_ERROR; /**< Return no error to the encoder core 1341 (else it may leak in some situations...) */ 1342 } 1343 } 1344 } 1345 1346 /** 1347 * Return */ 1348 M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR"); 1349 return M4NO_ERROR; 1350} 1351 1352/** 1353 ****************************************************************************** 1354 * M4OSA_ERR M4VSS3GPP_intApplyVideoEffect() 1355 * @brief Apply video effect from pPlaneIn to pPlaneOut 1356 * @param pC (IN/OUT) Internal edit context 1357 * @param uiClip1orClip2 (IN/OUT) 1 for first clip, 2 for second clip 1358 * @param pInputPlanes (IN) Input raw YUV420 image 1359 * @param pOutputPlanes (IN/OUT) Output raw YUV420 image 1360 * @return M4NO_ERROR: No error 1361 ****************************************************************************** 1362 */ 1363static M4OSA_ERR 1364M4VSS3GPP_intApplyVideoEffect( M4VSS3GPP_InternalEditContext *pC, 1365 M4VIFI_ImagePlane *pPlaneIn, 1366 M4VIFI_ImagePlane *pPlaneOut ) 1367{ 1368 M4OSA_ERR err; 1369 1370 M4VSS3GPP_ClipContext *pClip; 1371 M4VSS3GPP_EffectSettings *pFx; 1372 M4VFL_CurtainParam curtainParams; 1373 M4VSS3GPP_ExternalProgress extProgress; 1374 1375 M4OSA_Double VideoEffectTime; 1376 M4OSA_Double PercentageDone; 1377 M4OSA_Int32 tmp; 1378 1379 M4VIFI_ImagePlane *pPlaneTempIn; 1380 M4VIFI_ImagePlane *pPlaneTempOut; 1381 M4OSA_UInt8 i; 1382 M4OSA_UInt8 NumActiveEffects =0; 1383 1384 1385 pClip = pC->pC1; 1386 if (pC->bIssecondClip == M4OSA_TRUE) 1387 { 1388 NumActiveEffects = pC->nbActiveEffects1; 1389 } 1390 else 1391 { 1392 NumActiveEffects = pC->nbActiveEffects; 1393 } 1394 1395 /** 1396 * Allocate temporary plane if needed RC */ 1397 if (M4OSA_NULL == pC->yuv4[0].pac_data && NumActiveEffects > 1) 1398 { 1399 err = M4VSS3GPP_intAllocateYUV420(pC->yuv4, pC->ewc.uiVideoWidth, 1400 pC->ewc.uiVideoHeight); 1401 1402 if( M4NO_ERROR != err ) 1403 { 1404 M4OSA_TRACE1_1( 1405 "M4VSS3GPP_intApplyVideoEffect: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\ 1406 returning M4NO_ERROR", 1407 err); 1408 pC->ewc.VppError = err; 1409 return 1410 M4NO_ERROR; /**< Return no error to the encoder core 1411 (else it may leak in some situations...) */ 1412 } 1413 } 1414 1415 if (NumActiveEffects % 2 == 0) 1416 { 1417 pPlaneTempIn = pPlaneIn; 1418 pPlaneTempOut = pC->yuv4; 1419 } 1420 else 1421 { 1422 pPlaneTempIn = pPlaneIn; 1423 pPlaneTempOut = pPlaneOut; 1424 } 1425 1426 for (i=0; i<NumActiveEffects; i++) 1427 { 1428 if (pC->bIssecondClip == M4OSA_TRUE) 1429 { 1430 1431 1432 pFx = &(pC->pEffectsList[pC->pActiveEffectsList1[i]]); 1433 /* Compute how far from the beginning of the effect we are, in clip-base time. */ 1434 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1435 VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) + 1436 pC->pTransitionList[pC->uiCurrentClip]. 1437 uiTransitionDuration- pFx->uiStartTime; 1438 } 1439 else 1440 { 1441 pFx = &(pC->pEffectsList[pC->pActiveEffectsList[i]]); 1442 /* Compute how far from the beginning of the effect we are, in clip-base time. */ 1443 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1444 VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pFx->uiStartTime; 1445 } 1446 1447 1448 1449 /* To calculate %, substract timeIncrement because effect should finish on the last frame*/ 1450 /* which is presented from CTS = eof-timeIncrement till CTS = eof */ 1451 PercentageDone = VideoEffectTime 1452 / ((M4OSA_Float)pFx->uiDuration/*- pC->dOutputFrameDuration*/); 1453 1454 if( PercentageDone < 0.0 ) 1455 PercentageDone = 0.0; 1456 1457 if( PercentageDone > 1.0 ) 1458 PercentageDone = 1.0; 1459 1460 switch( pFx->VideoEffectType ) 1461 { 1462 case M4VSS3GPP_kVideoEffectType_FadeFromBlack: 1463 /** 1464 * Compute where we are in the effect (scale is 0->1024). */ 1465 tmp = (M4OSA_Int32)(PercentageDone * 1024); 1466 1467 /** 1468 * Apply the darkening effect */ 1469 err = 1470 M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn, 1471 (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL); 1472 1473 if( M4NO_ERROR != err ) 1474 { 1475 M4OSA_TRACE1_1( 1476 "M4VSS3GPP_intApplyVideoEffect:\ 1477 M4VFL_modifyLumaWithScale returns error 0x%x,\ 1478 returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", 1479 err); 1480 return M4VSS3GPP_ERR_LUMA_FILTER_ERROR; 1481 } 1482 break; 1483 1484 case M4VSS3GPP_kVideoEffectType_CurtainOpening: 1485 /** 1486 * Compute where we are in the effect (scale is 0->height). 1487 * It is done with floats because tmp x height can be very large 1488 (with long clips).*/ 1489 curtainParams.nb_black_lines = 1490 (M4OSA_UInt16)(( 1.0 - PercentageDone) 1491 * pPlaneTempIn[0].u_height); 1492 /** 1493 * The curtain is hanged on the ceiling */ 1494 curtainParams.top_is_black = 1; 1495 1496 /** 1497 * Apply the curtain effect */ 1498 err = M4VFL_applyCurtain((M4ViComImagePlane *)pPlaneTempIn, 1499 (M4ViComImagePlane *)pPlaneTempOut, &curtainParams, 1500 M4OSA_NULL); 1501 1502 if( M4NO_ERROR != err ) 1503 { 1504 M4OSA_TRACE1_1( 1505 "M4VSS3GPP_intApplyVideoEffect: M4VFL_applyCurtain returns error 0x%x,\ 1506 returning M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR", 1507 err); 1508 return M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR; 1509 } 1510 break; 1511 1512 case M4VSS3GPP_kVideoEffectType_FadeToBlack: 1513 /** 1514 * Compute where we are in the effect (scale is 0->1024) */ 1515 tmp = (M4OSA_Int32)(( 1.0 - PercentageDone) * 1024); 1516 1517 /** 1518 * Apply the darkening effect */ 1519 err = 1520 M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn, 1521 (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL); 1522 1523 if( M4NO_ERROR != err ) 1524 { 1525 M4OSA_TRACE1_1( 1526 "M4VSS3GPP_intApplyVideoEffect:\ 1527 M4VFL_modifyLumaWithScale returns error 0x%x,\ 1528 returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", 1529 err); 1530 return M4VSS3GPP_ERR_LUMA_FILTER_ERROR; 1531 } 1532 break; 1533 1534 case M4VSS3GPP_kVideoEffectType_CurtainClosing: 1535 /** 1536 * Compute where we are in the effect (scale is 0->height) */ 1537 curtainParams.nb_black_lines = 1538 (M4OSA_UInt16)(PercentageDone * pPlaneTempIn[0].u_height); 1539 1540 /** 1541 * The curtain is hanged on the ceiling */ 1542 curtainParams.top_is_black = 1; 1543 1544 /** 1545 * Apply the curtain effect */ 1546 err = M4VFL_applyCurtain((M4ViComImagePlane *)pPlaneTempIn, 1547 (M4ViComImagePlane *)pPlaneTempOut, &curtainParams, 1548 M4OSA_NULL); 1549 1550 if( M4NO_ERROR != err ) 1551 { 1552 M4OSA_TRACE1_1( 1553 "M4VSS3GPP_intApplyVideoEffect: M4VFL_applyCurtain returns error 0x%x,\ 1554 returning M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR", 1555 err); 1556 return M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR; 1557 } 1558 break; 1559 1560 default: 1561 if( pFx->VideoEffectType 1562 >= M4VSS3GPP_kVideoEffectType_External ) 1563 { 1564 M4OSA_UInt32 Cts = 0; 1565 M4OSA_Int32 nextEffectTime; 1566 1567 /** 1568 * Compute where we are in the effect (scale is 0->1000) */ 1569 tmp = (M4OSA_Int32)(PercentageDone * 1000); 1570 1571 /** 1572 * Set the progress info provided to the external function */ 1573 extProgress.uiProgress = (M4OSA_UInt32)tmp; 1574 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1575 extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts; 1576 extProgress.uiClipTime = extProgress.uiOutputTime - pClip->iVoffset; 1577 extProgress.bIsLast = M4OSA_FALSE; 1578 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1579 nextEffectTime = (M4OSA_Int32)(pC->ewc.dInputVidCts \ 1580 + pC->dOutputFrameDuration); 1581 if(nextEffectTime >= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) 1582 { 1583 extProgress.bIsLast = M4OSA_TRUE; 1584 } 1585 1586 err = pFx->ExtVideoEffectFct(pFx->pExtVideoEffectFctCtxt, 1587 pPlaneTempIn, pPlaneTempOut, &extProgress, 1588 pFx->VideoEffectType 1589 - M4VSS3GPP_kVideoEffectType_External); 1590 1591 if( M4NO_ERROR != err ) 1592 { 1593 M4OSA_TRACE1_1( 1594 "M4VSS3GPP_intApplyVideoEffect: \ 1595 External video effect function returns 0x%x!", 1596 err); 1597 return err; 1598 } 1599 break; 1600 } 1601 else 1602 { 1603 M4OSA_TRACE1_1( 1604 "M4VSS3GPP_intApplyVideoEffect: unknown effect type (0x%x),\ 1605 returning M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE", 1606 pFx->VideoEffectType); 1607 return M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE; 1608 } 1609 } 1610 /** 1611 * RC Updates pTempPlaneIn and pTempPlaneOut depending on current effect */ 1612 if (((i % 2 == 0) && (NumActiveEffects % 2 == 0)) 1613 || ((i % 2 != 0) && (NumActiveEffects % 2 != 0))) 1614 { 1615 pPlaneTempIn = pC->yuv4; 1616 pPlaneTempOut = pPlaneOut; 1617 } 1618 else 1619 { 1620 pPlaneTempIn = pPlaneOut; 1621 pPlaneTempOut = pC->yuv4; 1622 } 1623 } 1624 1625 /** 1626 * Return */ 1627 M4OSA_TRACE3_0("M4VSS3GPP_intApplyVideoEffect: returning M4NO_ERROR"); 1628 return M4NO_ERROR; 1629} 1630 1631/** 1632 ****************************************************************************** 1633 * M4OSA_ERR M4VSS3GPP_intVideoTransition() 1634 * @brief Apply video transition effect pC1+pC2->pPlaneOut 1635 * @param pC (IN/OUT) Internal edit context 1636 * @param pOutputPlanes (IN/OUT) Output raw YUV420 image 1637 * @return M4NO_ERROR: No error 1638 ****************************************************************************** 1639 */ 1640static M4OSA_ERR 1641M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC, 1642 M4VIFI_ImagePlane *pPlaneOut ) 1643{ 1644 M4OSA_ERR err; 1645 M4OSA_Int32 iProgress; 1646 M4VSS3GPP_ExternalProgress extProgress; 1647 M4VIFI_ImagePlane *pPlane; 1648 M4OSA_Int32 i; 1649 const M4OSA_Int32 iDur = (M4OSA_Int32)pC-> 1650 pTransitionList[pC->uiCurrentClip].uiTransitionDuration; 1651 1652 /** 1653 * Compute how far from the end cut we are, in clip-base time. 1654 * It is done with integers because the offset and begin cut have been rounded already. */ 1655 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1656 iProgress = (M4OSA_Int32)((M4OSA_Double)pC->pC1->iEndTime) - pC->ewc.dInputVidCts + 1657 ((M4OSA_Double)pC->pC1->iVoffset); 1658 /** 1659 * We must remove the duration of one frame, else we would almost never reach the end 1660 * (It's kind of a "pile and intervals" issue). */ 1661 iProgress -= (M4OSA_Int32)pC->dOutputFrameDuration; 1662 1663 if( iProgress < 0 ) /**< Sanity checks */ 1664 { 1665 iProgress = 0; 1666 } 1667 1668 /** 1669 * Compute where we are in the transition, on a base 1000 */ 1670 iProgress = ( ( iDur - iProgress) * 1000) / iDur; 1671 1672 /** 1673 * Sanity checks */ 1674 if( iProgress < 0 ) 1675 { 1676 iProgress = 0; 1677 } 1678 else if( iProgress > 1000 ) 1679 { 1680 iProgress = 1000; 1681 } 1682 1683 switch( pC->pTransitionList[pC->uiCurrentClip].TransitionBehaviour ) 1684 { 1685 case M4VSS3GPP_TransitionBehaviour_SpeedUp: 1686 iProgress = ( iProgress * iProgress) / 1000; 1687 break; 1688 1689 case M4VSS3GPP_TransitionBehaviour_Linear: 1690 /*do nothing*/ 1691 break; 1692 1693 case M4VSS3GPP_TransitionBehaviour_SpeedDown: 1694 iProgress = (M4OSA_Int32)(sqrt(iProgress * 1000)); 1695 break; 1696 1697 case M4VSS3GPP_TransitionBehaviour_SlowMiddle: 1698 if( iProgress < 500 ) 1699 { 1700 iProgress = (M4OSA_Int32)(sqrt(iProgress * 500)); 1701 } 1702 else 1703 { 1704 iProgress = 1705 (M4OSA_Int32)(( ( ( iProgress - 500) * (iProgress - 500)) 1706 / 500) + 500); 1707 } 1708 break; 1709 1710 case M4VSS3GPP_TransitionBehaviour_FastMiddle: 1711 if( iProgress < 500 ) 1712 { 1713 iProgress = (M4OSA_Int32)(( iProgress * iProgress) / 500); 1714 } 1715 else 1716 { 1717 iProgress = (M4OSA_Int32)(sqrt(( iProgress - 500) * 500) + 500); 1718 } 1719 break; 1720 1721 default: 1722 /*do nothing*/ 1723 break; 1724 } 1725 1726 switch( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType ) 1727 { 1728 case M4VSS3GPP_kVideoTransitionType_CrossFade: 1729 /** 1730 * Apply the transition effect */ 1731 err = M4VIFI_ImageBlendingonYUV420(M4OSA_NULL, 1732 (M4ViComImagePlane *)pC->yuv1, 1733 (M4ViComImagePlane *)pC->yuv2, 1734 (M4ViComImagePlane *)pPlaneOut, iProgress); 1735 1736 if( M4NO_ERROR != err ) 1737 { 1738 M4OSA_TRACE1_1( 1739 "M4VSS3GPP_intVideoTransition:\ 1740 M4VIFI_ImageBlendingonYUV420 returns error 0x%x,\ 1741 returning M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR", 1742 err); 1743 return M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR; 1744 } 1745 break; 1746 1747 case M4VSS3GPP_kVideoTransitionType_None: 1748 /** 1749 * This is a stupid-non optimized version of the None transition... 1750 * We copy the YUV frame */ 1751 if( iProgress < 500 ) /**< first half of transition */ 1752 { 1753 pPlane = pC->yuv1; 1754 } 1755 else /**< second half of transition */ 1756 { 1757 pPlane = pC->yuv2; 1758 } 1759 /** 1760 * Copy the input YUV frames */ 1761 i = 3; 1762 1763 while( i-- > 0 ) 1764 { 1765 M4OSA_memcpy((M4OSA_MemAddr8)pPlaneOut[i].pac_data, 1766 (M4OSA_MemAddr8)pPlane[i].pac_data, 1767 pPlaneOut[i].u_stride * pPlaneOut[i].u_height); 1768 } 1769 break; 1770 1771 default: 1772 if( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType 1773 >= M4VSS3GPP_kVideoTransitionType_External ) 1774 { 1775 /** 1776 * Set the progress info provided to the external function */ 1777 extProgress.uiProgress = (M4OSA_UInt32)iProgress; 1778 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1779 extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts; 1780 extProgress.uiClipTime = extProgress.uiOutputTime - pC->pC1->iVoffset; 1781 1782 err = pC->pTransitionList[pC-> 1783 uiCurrentClip].ExtVideoTransitionFct( 1784 pC->pTransitionList[pC-> 1785 uiCurrentClip].pExtVideoTransitionFctCtxt, 1786 pC->yuv1, pC->yuv2, pPlaneOut, &extProgress, 1787 pC->pTransitionList[pC-> 1788 uiCurrentClip].VideoTransitionType 1789 - M4VSS3GPP_kVideoTransitionType_External); 1790 1791 if( M4NO_ERROR != err ) 1792 { 1793 M4OSA_TRACE1_1( 1794 "M4VSS3GPP_intVideoTransition:\ 1795 External video transition function returns 0x%x!", 1796 err); 1797 return err; 1798 } 1799 break; 1800 } 1801 else 1802 { 1803 M4OSA_TRACE1_1( 1804 "M4VSS3GPP_intVideoTransition: unknown transition type (0x%x),\ 1805 returning M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE", 1806 pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType); 1807 return M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE; 1808 } 1809 } 1810 1811 /** 1812 * Return */ 1813 M4OSA_TRACE3_0("M4VSS3GPP_intVideoTransition: returning M4NO_ERROR"); 1814 return M4NO_ERROR; 1815} 1816 1817/** 1818 ****************************************************************************** 1819 * M4OSA_Void M4VSS3GPP_intUpdateTimeInfo() 1820 * @brief Update bit stream time info by Counter Time System to be compliant with 1821 * players using bit stream time info 1822 * @note H263 uses an absolute time counter unlike MPEG4 which uses Group Of Vops 1823 * (GOV, see the standard) 1824 * @param pC (IN/OUT) returns time updated video AU, 1825 * the offset between system and video time (MPEG4 only) 1826 * and the state of the current clip (MPEG4 only) 1827 * @return nothing 1828 ****************************************************************************** 1829 */ 1830static M4OSA_Void 1831M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC, 1832 M4SYS_AccessUnit *pAU ) 1833{ 1834 M4OSA_UInt8 uiTmp; 1835 M4OSA_UInt32 uiCts = 0; 1836 M4OSA_MemAddr8 pTmp; 1837 M4OSA_UInt32 uiAdd; 1838 M4OSA_UInt32 uiCurrGov; 1839 M4OSA_Int8 iDiff; 1840 1841 M4VSS3GPP_ClipContext *pClipCtxt = pC->pC1; 1842 M4OSA_Int32 *pOffset = &(pC->ewc.iMpeg4GovOffset); 1843 1844 /** 1845 * Set H263 time counter from system time */ 1846 if( M4SYS_kH263 == pAU->stream->streamType ) 1847 { 1848 uiTmp = (M4OSA_UInt8)((M4OSA_UInt32)( ( pAU->CTS * 30) / 1001 + 0.5) 1849 % M4VSS3GPP_EDIT_H263_MODULO_TIME); 1850 M4VSS3GPP_intSetH263TimeCounter((M4OSA_MemAddr8)(pAU->dataAddress), 1851 uiTmp); 1852 } 1853 /* 1854 * Set MPEG4 GOV time counter regarding video and system time */ 1855 else if( M4SYS_kMPEG_4 == pAU->stream->streamType ) 1856 { 1857 /* 1858 * If GOV. 1859 * beware of little/big endian! */ 1860 /* correction: read 8 bits block instead of one 32 bits block */ 1861 M4OSA_UInt8 *temp8 = (M4OSA_UInt8 *)(pAU->dataAddress); 1862 M4OSA_UInt32 temp32 = 0; 1863 1864 temp32 = ( 0x000000ff & (M4OSA_UInt32)(*temp8)) 1865 + (0x0000ff00 & ((M4OSA_UInt32)(*(temp8 + 1))) << 8) 1866 + (0x00ff0000 & ((M4OSA_UInt32)(*(temp8 + 2))) << 16) 1867 + (0xff000000 & ((M4OSA_UInt32)(*(temp8 + 3))) << 24); 1868 1869 M4OSA_TRACE3_2("RC: Temp32: 0x%x, dataAddress: 0x%x\n", temp32, 1870 *(pAU->dataAddress)); 1871 1872 if( M4VSS3GPP_EDIT_GOV_HEADER == temp32 ) 1873 { 1874 pTmp = 1875 (M4OSA_MemAddr8)(pAU->dataAddress 1876 + 1); /**< Jump to the time code (just after the 32 bits header) */ 1877 uiAdd = (M4OSA_UInt32)(pAU->CTS)+( *pOffset); 1878 1879 switch( pClipCtxt->bMpeg4GovState ) 1880 { 1881 case M4OSA_FALSE: /*< INIT */ 1882 { 1883 /* video time = ceil (system time + offset) */ 1884 uiCts = ( uiAdd + 999) / 1000; 1885 1886 /* offset update */ 1887 ( *pOffset) += (( uiCts * 1000) - uiAdd); 1888 1889 /* Save values */ 1890 pClipCtxt->uiMpeg4PrevGovValueSet = uiCts; 1891 1892 /* State to 'first' */ 1893 pClipCtxt->bMpeg4GovState = M4OSA_TRUE; 1894 } 1895 break; 1896 1897 case M4OSA_TRUE: /*< UPDATE */ 1898 { 1899 /* Get current Gov value */ 1900 M4VSS3GPP_intGetMPEG4Gov(pTmp, &uiCurrGov); 1901 1902 /* video time = floor or ceil (system time + offset) */ 1903 uiCts = (uiAdd / 1000); 1904 iDiff = (M4OSA_Int8)(uiCurrGov 1905 - pClipCtxt->uiMpeg4PrevGovValueGet - uiCts 1906 + pClipCtxt->uiMpeg4PrevGovValueSet); 1907 1908 /* ceiling */ 1909 if( iDiff > 0 ) 1910 { 1911 uiCts += (M4OSA_UInt32)(iDiff); 1912 1913 /* offset update */ 1914 ( *pOffset) += (( uiCts * 1000) - uiAdd); 1915 } 1916 1917 /* Save values */ 1918 pClipCtxt->uiMpeg4PrevGovValueGet = uiCurrGov; 1919 pClipCtxt->uiMpeg4PrevGovValueSet = uiCts; 1920 } 1921 break; 1922 } 1923 1924 M4VSS3GPP_intSetMPEG4Gov(pTmp, uiCts); 1925 } 1926 } 1927 return; 1928} 1929 1930/** 1931 ****************************************************************************** 1932 * M4OSA_Void M4VSS3GPP_intCheckVideoEffects() 1933 * @brief Check which video effect must be applied at the current time 1934 ****************************************************************************** 1935 */ 1936static M4OSA_Void 1937M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC, 1938 M4OSA_UInt8 uiClipNumber ) 1939{ 1940 M4OSA_UInt8 uiClipIndex; 1941 M4OSA_UInt8 uiFxIndex, i; 1942 M4VSS3GPP_ClipContext *pClip; 1943 M4VSS3GPP_EffectSettings *pFx; 1944 M4OSA_Int32 Off, BC, EC; 1945 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1946 M4OSA_Int32 t = (M4OSA_Int32)pC->ewc.dInputVidCts; 1947 1948 uiClipIndex = pC->uiCurrentClip; 1949 pClip = pC->pC1; 1950 /** 1951 * Shortcuts for code readability */ 1952 Off = pClip->iVoffset; 1953 BC = pClip->iActualVideoBeginCut; 1954 EC = pClip->iEndTime; 1955 1956 i = 0; 1957 1958 for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ ) 1959 { 1960 /** Shortcut, reverse order because of priority between effects(EndEffect always clean )*/ 1961 pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]); 1962 1963 if( M4VSS3GPP_kVideoEffectType_None != pFx->VideoEffectType ) 1964 { 1965 /** 1966 * Check if there is actually a video effect */ 1967 1968 if(uiClipNumber ==1) 1969 { 1970 if ((t >= (M4OSA_Int32)(pFx->uiStartTime)) && /**< Are we after the start time of the effect? */ 1971 (t < (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) /**< Are we into the effect duration? */ 1972 { 1973 /** 1974 * Set the active effect(s) */ 1975 pC->pActiveEffectsList[i] = pC->nbEffects-1-uiFxIndex; 1976 1977 /** 1978 * Update counter of active effects */ 1979 i++; 1980 1981 /** 1982 * The third effect has the highest priority, then the second one, then the first one. 1983 * Hence, as soon as we found an active effect, we can get out of this loop */ 1984 1985 } 1986 } 1987 else 1988 { 1989 if ((t + pC->pTransitionList[uiClipIndex].uiTransitionDuration >= 1990 (M4OSA_Int32)(pFx->uiStartTime)) && (t + pC->pTransitionList[uiClipIndex].uiTransitionDuration 1991 < (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) /**< Are we into the effect duration? */ 1992 { 1993 /** 1994 * Set the active effect(s) */ 1995 pC->pActiveEffectsList1[i] = pC->nbEffects-1-uiFxIndex; 1996 1997 /** 1998 * Update counter of active effects */ 1999 i++; 2000 2001 /** 2002 * The third effect has the highest priority, then the second one, then the first one. 2003 * Hence, as soon as we found an active effect, we can get out of this loop */ 2004 } 2005 2006 2007 } 2008 2009 } 2010 } 2011 2012 if(1==uiClipNumber) 2013 { 2014 /** 2015 * Save number of active effects */ 2016 pC->nbActiveEffects = i; 2017 } 2018 else 2019 { 2020 pC->nbActiveEffects1 = i; 2021 } 2022 2023 /** 2024 * Change the absolut time to clip related time */ 2025 t -= Off; 2026 2027 /** 2028 * Check if we are on the begin cut (for clip1 only) */ 2029 if( ( 0 != BC) && (t == BC) && (1 == uiClipNumber) ) 2030 { 2031 pC->bClip1AtBeginCut = M4OSA_TRUE; 2032 } 2033 else 2034 { 2035 pC->bClip1AtBeginCut = M4OSA_FALSE; 2036 } 2037 2038 return; 2039} 2040 2041/** 2042 ****************************************************************************** 2043 * M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder() 2044 * @brief Creates the video encoder 2045 * @note 2046 ****************************************************************************** 2047 */ 2048M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder( M4VSS3GPP_InternalEditContext *pC ) 2049{ 2050 M4OSA_ERR err; 2051 M4ENCODER_AdvancedParams EncParams; 2052 2053 /** 2054 * Simulate a writer interface with our specific function */ 2055 pC->ewc.OurWriterDataInterface.pProcessAU = 2056 M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific, 2057 but it follow the writer interface */ 2058 pC->ewc.OurWriterDataInterface.pStartAU = 2059 M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific, 2060 but it follow the writer interface */ 2061 pC->ewc.OurWriterDataInterface.pWriterContext = 2062 (M4WRITER_Context) 2063 pC; /**< We give the internal context as writer context */ 2064 2065 /** 2066 * Get the encoder interface, if not already done */ 2067 if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts ) 2068 { 2069 err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI, 2070 pC->ewc.VideoStreamType); 2071 M4OSA_TRACE1_1( 2072 "M4VSS3GPP_intCreateVideoEncoder: setCurrentEncoder returns 0x%x", 2073 err); 2074 M4ERR_CHECK_RETURN(err); 2075 } 2076 2077 /** 2078 * Set encoder shell parameters according to VSS settings */ 2079 2080 /* Common parameters */ 2081 EncParams.InputFormat = M4ENCODER_kIYUV420; 2082 EncParams.FrameWidth = pC->ewc.uiVideoWidth; 2083 EncParams.FrameHeight = pC->ewc.uiVideoHeight; 2084 EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale; 2085 2086 if( pC->bIsMMS == M4OSA_FALSE ) 2087 { 2088 /* No strict regulation in video editor */ 2089 /* Because of the effects and transitions we should allow more flexibility */ 2090 /* Also it prevents to drop important frames (with a bad result on sheduling and 2091 block effetcs) */ 2092 EncParams.bInternalRegulation = M4OSA_FALSE; 2093 // Variable framerate is not supported by StageFright encoders 2094 EncParams.FrameRate = M4ENCODER_k30_FPS; 2095 } 2096 else 2097 { 2098 /* In case of MMS mode, we need to enable bitrate regulation to be sure */ 2099 /* to reach the targeted output file size */ 2100 EncParams.bInternalRegulation = M4OSA_TRUE; 2101 EncParams.FrameRate = pC->MMSvideoFramerate; 2102 } 2103 2104 /** 2105 * Other encoder settings (defaults) */ 2106 EncParams.uiHorizontalSearchRange = 0; /* use default */ 2107 EncParams.uiVerticalSearchRange = 0; /* use default */ 2108 EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */ 2109 EncParams.uiIVopPeriod = 0; /* use default */ 2110 EncParams.uiMotionEstimationTools = 0; /* M4V_MOTION_EST_TOOLS_ALL */ 2111 EncParams.bAcPrediction = M4OSA_TRUE; /* use AC prediction */ 2112 EncParams.uiStartingQuantizerValue = 10; /* initial QP = 10 */ 2113 EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */ 2114 2115 switch ( pC->ewc.VideoStreamType ) 2116 { 2117 case M4SYS_kH263: 2118 2119 EncParams.Format = M4ENCODER_kH263; 2120 2121 EncParams.uiStartingQuantizerValue = 10; 2122 EncParams.uiRateFactor = 1; /* default */ 2123 2124 EncParams.bErrorResilience = M4OSA_FALSE; 2125 EncParams.bDataPartitioning = M4OSA_FALSE; 2126 break; 2127 2128 case M4SYS_kMPEG_4: 2129 2130 EncParams.Format = M4ENCODER_kMPEG4; 2131 2132 EncParams.uiStartingQuantizerValue = 8; 2133 EncParams.uiRateFactor = (M4OSA_UInt8)(( pC->dOutputFrameDuration 2134 * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5); 2135 2136 if( EncParams.uiRateFactor == 0 ) 2137 EncParams.uiRateFactor = 1; /* default */ 2138 2139 if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning ) 2140 { 2141 EncParams.bErrorResilience = M4OSA_FALSE; 2142 EncParams.bDataPartitioning = M4OSA_FALSE; 2143 } 2144 else 2145 { 2146 EncParams.bErrorResilience = M4OSA_TRUE; 2147 EncParams.bDataPartitioning = M4OSA_TRUE; 2148 } 2149 break; 2150 2151 case M4SYS_kH264: 2152 M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: M4SYS_H264"); 2153 2154 EncParams.Format = M4ENCODER_kH264; 2155 2156 EncParams.uiStartingQuantizerValue = 10; 2157 EncParams.uiRateFactor = 1; /* default */ 2158 2159 EncParams.bErrorResilience = M4OSA_FALSE; 2160 EncParams.bDataPartitioning = M4OSA_FALSE; 2161 //EncParams.FrameRate = M4VIDEOEDITING_k5_FPS; 2162 break; 2163 2164 default: 2165 M4OSA_TRACE1_1( 2166 "M4VSS3GPP_intCreateVideoEncoder: Unknown videoStreamType 0x%x", 2167 pC->ewc.VideoStreamType); 2168 return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT; 2169 } 2170 2171 /* In case of EMP we overwrite certain parameters */ 2172 if( M4OSA_TRUE == pC->ewc.bActivateEmp ) 2173 { 2174 EncParams.uiHorizontalSearchRange = 15; /* set value */ 2175 EncParams.uiVerticalSearchRange = 15; /* set value */ 2176 EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */ 2177 EncParams.uiIVopPeriod = 15; /* one I frame every 15 frames */ 2178 EncParams.uiMotionEstimationTools = 1; /* M4V_MOTION_EST_TOOLS_NO_4MV */ 2179 EncParams.bAcPrediction = M4OSA_FALSE; /* no AC prediction */ 2180 EncParams.uiStartingQuantizerValue = 10; /* initial QP = 10 */ 2181 EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */ 2182 } 2183 2184 if( pC->bIsMMS == M4OSA_FALSE ) 2185 { 2186 /* Compute max bitrate depending on input files bitrates and transitions */ 2187 if( pC->Vstate == M4VSS3GPP_kEditVideoState_TRANSITION ) 2188 { 2189 /* Max of the two blended files */ 2190 if( pC->pC1->pSettings->ClipProperties.uiVideoBitrate 2191 > pC->pC2->pSettings->ClipProperties.uiVideoBitrate ) 2192 EncParams.Bitrate = 2193 pC->pC1->pSettings->ClipProperties.uiVideoBitrate; 2194 else 2195 EncParams.Bitrate = 2196 pC->pC2->pSettings->ClipProperties.uiVideoBitrate; 2197 } 2198 else 2199 { 2200 /* Same as input file */ 2201 EncParams.Bitrate = 2202 pC->pC1->pSettings->ClipProperties.uiVideoBitrate; 2203 } 2204 } 2205 else 2206 { 2207 EncParams.Bitrate = pC->uiMMSVideoBitrate; /* RC */ 2208 EncParams.uiTimeScale = 0; /* We let the encoder choose the timescale */ 2209 } 2210 2211 M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctInit"); 2212 /** 2213 * Init the video encoder (advanced settings version of the encoder Open function) */ 2214 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext, 2215 &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC, 2216 pC->ShellAPI.pCurrentVideoEncoderExternalAPI, 2217 pC->ShellAPI.pCurrentVideoEncoderUserData); 2218 2219 if( M4NO_ERROR != err ) 2220 { 2221 M4OSA_TRACE1_1( 2222 "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctInit returns 0x%x", 2223 err); 2224 return err; 2225 } 2226 2227 pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed; 2228 M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctOpen"); 2229 2230 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext, 2231 &pC->ewc.WriterVideoAU, &EncParams); 2232 2233 if( M4NO_ERROR != err ) 2234 { 2235 M4OSA_TRACE1_1( 2236 "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctOpen returns 0x%x", 2237 err); 2238 return err; 2239 } 2240 2241 pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped; 2242 M4OSA_TRACE1_0( 2243 "M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctStart"); 2244 2245 if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart ) 2246 { 2247 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart( 2248 pC->ewc.pEncContext); 2249 2250 if( M4NO_ERROR != err ) 2251 { 2252 M4OSA_TRACE1_1( 2253 "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctStart returns 0x%x", 2254 err); 2255 return err; 2256 } 2257 } 2258 2259 pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning; 2260 2261 /** 2262 * Return */ 2263 M4OSA_TRACE3_0("M4VSS3GPP_intCreateVideoEncoder: returning M4NO_ERROR"); 2264 return M4NO_ERROR; 2265} 2266 2267/** 2268 ****************************************************************************** 2269 * M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder() 2270 * @brief Destroy the video encoder 2271 * @note 2272 ****************************************************************************** 2273 */ 2274M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder( M4VSS3GPP_InternalEditContext *pC ) 2275{ 2276 M4OSA_ERR err = M4NO_ERROR; 2277 2278 if( M4OSA_NULL != pC->ewc.pEncContext ) 2279 { 2280 if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState ) 2281 { 2282 if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL ) 2283 { 2284 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop( 2285 pC->ewc.pEncContext); 2286 2287 if( M4NO_ERROR != err ) 2288 { 2289 M4OSA_TRACE1_1( 2290 "M4VSS3GPP_intDestroyVideoEncoder:\ 2291 pVideoEncoderGlobalFcts->pFctStop returns 0x%x", 2292 err); 2293 /* Well... how the heck do you handle a failed cleanup? */ 2294 } 2295 } 2296 2297 pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped; 2298 } 2299 2300 /* Has the encoder actually been opened? Don't close it if that's not the case. */ 2301 if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState ) 2302 { 2303 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose( 2304 pC->ewc.pEncContext); 2305 2306 if( M4NO_ERROR != err ) 2307 { 2308 M4OSA_TRACE1_1( 2309 "M4VSS3GPP_intDestroyVideoEncoder:\ 2310 pVideoEncoderGlobalFcts->pFctClose returns 0x%x", 2311 err); 2312 /* Well... how the heck do you handle a failed cleanup? */ 2313 } 2314 2315 pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed; 2316 } 2317 2318 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup( 2319 pC->ewc.pEncContext); 2320 2321 if( M4NO_ERROR != err ) 2322 { 2323 M4OSA_TRACE1_1( 2324 "M4VSS3GPP_intDestroyVideoEncoder:\ 2325 pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!", 2326 err); 2327 /**< We do not return the error here because we still have stuff to free */ 2328 } 2329 2330 pC->ewc.encoderState = M4VSS3GPP_kNoEncoder; 2331 /** 2332 * Reset variable */ 2333 pC->ewc.pEncContext = M4OSA_NULL; 2334 } 2335 2336 M4OSA_TRACE3_1("M4VSS3GPP_intDestroyVideoEncoder: returning 0x%x", err); 2337 return err; 2338} 2339 2340/** 2341 ****************************************************************************** 2342 * M4OSA_Void M4VSS3GPP_intSetH263TimeCounter() 2343 * @brief Modify the time counter of the given H263 video AU 2344 * @note 2345 * @param pAuDataBuffer (IN/OUT) H263 Video AU to modify 2346 * @param uiCts (IN) New time counter value 2347 * @return nothing 2348 ****************************************************************************** 2349 */ 2350static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer, 2351 M4OSA_UInt8 uiCts ) 2352{ 2353 /* 2354 * The H263 time counter is 8 bits located on the "x" below: 2355 * 2356 * |--------|--------|--------|--------| 2357 * ???????? ???????? ??????xx xxxxxx?? 2358 */ 2359 2360 /** 2361 * Write the 2 bits on the third byte */ 2362 pAuDataBuffer[2] = ( pAuDataBuffer[2] & 0xFC) | (( uiCts >> 6) & 0x3); 2363 2364 /** 2365 * Write the 6 bits on the fourth byte */ 2366 pAuDataBuffer[3] = ( ( uiCts << 2) & 0xFC) | (pAuDataBuffer[3] & 0x3); 2367 2368 return; 2369} 2370 2371/** 2372 ****************************************************************************** 2373 * M4OSA_Void M4VSS3GPP_intSetMPEG4Gov() 2374 * @brief Modify the time info from Group Of VOP video AU 2375 * @note 2376 * @param pAuDataBuffer (IN) MPEG4 Video AU to modify 2377 * @param uiCtsSec (IN) New GOV time info in second unit 2378 * @return nothing 2379 ****************************************************************************** 2380 */ 2381static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer, 2382 M4OSA_UInt32 uiCtsSec ) 2383{ 2384 /* 2385 * The MPEG-4 time code length is 18 bits: 2386 * 2387 * hh mm marker ss 2388 * xxxxx|xxx xxx 1 xxxx xx ?????? 2389 * |----- ---|--- - ----|-- ------| 2390 */ 2391 M4OSA_UInt8 uiHh; 2392 M4OSA_UInt8 uiMm; 2393 M4OSA_UInt8 uiSs; 2394 M4OSA_UInt8 uiTmp; 2395 2396 /** 2397 * Write the 2 last bits ss */ 2398 uiSs = (M4OSA_UInt8)(uiCtsSec % 60); /**< modulo part */ 2399 pAuDataBuffer[2] = (( ( uiSs & 0x03) << 6) | (pAuDataBuffer[2] & 0x3F)); 2400 2401 if( uiCtsSec < 60 ) 2402 { 2403 /** 2404 * Write the 3 last bits of mm, the marker bit (0x10 */ 2405 pAuDataBuffer[1] = (( 0x10) | (uiSs >> 2)); 2406 2407 /** 2408 * Write the 5 bits of hh and 3 of mm (out of 6) */ 2409 pAuDataBuffer[0] = 0; 2410 } 2411 else 2412 { 2413 /** 2414 * Write the 3 last bits of mm, the marker bit (0x10 */ 2415 uiTmp = (M4OSA_UInt8)(uiCtsSec / 60); /**< integer part */ 2416 uiMm = (M4OSA_UInt8)(uiTmp % 60); 2417 pAuDataBuffer[1] = (( uiMm << 5) | (0x10) | (uiSs >> 2)); 2418 2419 if( uiTmp < 60 ) 2420 { 2421 /** 2422 * Write the 5 bits of hh and 3 of mm (out of 6) */ 2423 pAuDataBuffer[0] = ((uiMm >> 3)); 2424 } 2425 else 2426 { 2427 /** 2428 * Write the 5 bits of hh and 3 of mm (out of 6) */ 2429 uiHh = (M4OSA_UInt8)(uiTmp / 60); 2430 pAuDataBuffer[0] = (( uiHh << 3) | (uiMm >> 3)); 2431 } 2432 } 2433 return; 2434} 2435 2436/** 2437 ****************************************************************************** 2438 * M4OSA_Void M4VSS3GPP_intGetMPEG4Gov() 2439 * @brief Get the time info from Group Of VOP video AU 2440 * @note 2441 * @param pAuDataBuffer (IN) MPEG4 Video AU to modify 2442 * @param pCtsSec (OUT) Current GOV time info in second unit 2443 * @return nothing 2444 ****************************************************************************** 2445 */ 2446static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer, 2447 M4OSA_UInt32 *pCtsSec ) 2448{ 2449 /* 2450 * The MPEG-4 time code length is 18 bits: 2451 * 2452 * hh mm marker ss 2453 * xxxxx|xxx xxx 1 xxxx xx ?????? 2454 * |----- ---|--- - ----|-- ------| 2455 */ 2456 M4OSA_UInt8 uiHh; 2457 M4OSA_UInt8 uiMm; 2458 M4OSA_UInt8 uiSs; 2459 M4OSA_UInt8 uiTmp; 2460 M4OSA_UInt32 uiCtsSec; 2461 2462 /** 2463 * Read ss */ 2464 uiSs = (( pAuDataBuffer[2] & 0xC0) >> 6); 2465 uiTmp = (( pAuDataBuffer[1] & 0x0F) << 2); 2466 uiCtsSec = uiSs + uiTmp; 2467 2468 /** 2469 * Read mm */ 2470 uiMm = (( pAuDataBuffer[1] & 0xE0) >> 5); 2471 uiTmp = (( pAuDataBuffer[0] & 0x07) << 3); 2472 uiMm = uiMm + uiTmp; 2473 uiCtsSec = ( uiMm * 60) + uiCtsSec; 2474 2475 /** 2476 * Read hh */ 2477 uiHh = (( pAuDataBuffer[0] & 0xF8) >> 3); 2478 2479 if( uiHh ) 2480 { 2481 uiCtsSec = ( uiHh * 3600) + uiCtsSec; 2482 } 2483 2484 /* 2485 * in sec */ 2486 *pCtsSec = uiCtsSec; 2487 2488 return; 2489} 2490 2491/** 2492 ****************************************************************************** 2493 * M4OSA_ERR M4VSS3GPP_intAllocateYUV420() 2494 * @brief Allocate the three YUV 4:2:0 planes 2495 * @note 2496 * @param pPlanes (IN/OUT) valid pointer to 3 M4VIFI_ImagePlane structures 2497 * @param uiWidth (IN) Image width 2498 * @param uiHeight(IN) Image height 2499 ****************************************************************************** 2500 */ 2501static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes, 2502 M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight ) 2503{ 2504 2505 pPlanes[0].u_width = uiWidth; 2506 pPlanes[0].u_height = uiHeight; 2507 pPlanes[0].u_stride = uiWidth; 2508 pPlanes[0].u_topleft = 0; 2509 pPlanes[0].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[0].u_stride 2510 * pPlanes[0].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[0].pac_data"); 2511 2512 if( M4OSA_NULL == pPlanes[0].pac_data ) 2513 { 2514 M4OSA_TRACE1_0( 2515 "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[0].pac_data,\ 2516 returning M4ERR_ALLOC"); 2517 return M4ERR_ALLOC; 2518 } 2519 2520 pPlanes[1].u_width = pPlanes[0].u_width >> 1; 2521 pPlanes[1].u_height = pPlanes[0].u_height >> 1; 2522 pPlanes[1].u_stride = pPlanes[1].u_width; 2523 pPlanes[1].u_topleft = 0; 2524 pPlanes[1].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[1].u_stride 2525 * pPlanes[1].u_height, M4VSS3GPP,(M4OSA_Char *) "pPlanes[1].pac_data"); 2526 2527 if( M4OSA_NULL == pPlanes[1].pac_data ) 2528 { 2529 M4OSA_TRACE1_0( 2530 "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[1].pac_data,\ 2531 returning M4ERR_ALLOC"); 2532 return M4ERR_ALLOC; 2533 } 2534 2535 pPlanes[2].u_width = pPlanes[1].u_width; 2536 pPlanes[2].u_height = pPlanes[1].u_height; 2537 pPlanes[2].u_stride = pPlanes[2].u_width; 2538 pPlanes[2].u_topleft = 0; 2539 pPlanes[2].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[2].u_stride 2540 * pPlanes[2].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[2].pac_data"); 2541 2542 if( M4OSA_NULL == pPlanes[2].pac_data ) 2543 { 2544 M4OSA_TRACE1_0( 2545 "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[2].pac_data,\ 2546 returning M4ERR_ALLOC"); 2547 return M4ERR_ALLOC; 2548 } 2549 2550 /** 2551 * Return */ 2552 M4OSA_TRACE3_0("M4VSS3GPP_intAllocateYUV420: returning M4NO_ERROR"); 2553 return M4NO_ERROR; 2554} 2555