M4VSS3GPP_EditVideo.c revision f58e4c332ecf8c0cd6037b010256d0b4c56e6a17
1/* 2 * Copyright (C) 2004-2011 NXP Software 3 * Copyright (C) 2011 The Android Open Source Project 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 */ 17/** 18 ****************************************************************************** 19 * @file M4VSS3GPP_EditVideo.c 20 * @brief Video Studio Service 3GPP edit API implementation. 21 * @note 22 ****************************************************************************** 23 */ 24 25/****************/ 26/*** Includes ***/ 27/****************/ 28 29#include "NXPSW_CompilerSwitches.h" 30/** 31 * Our header */ 32#include "M4VSS3GPP_API.h" 33#include "M4VSS3GPP_InternalTypes.h" 34#include "M4VSS3GPP_InternalFunctions.h" 35#include "M4VSS3GPP_InternalConfig.h" 36#include "M4VSS3GPP_ErrorCodes.h" 37 38// StageFright encoders require %16 resolution 39#include "M4ENCODER_common.h" 40/** 41 * OSAL headers */ 42#include "M4OSA_Memory.h" /**< OSAL memory management */ 43#include "M4OSA_Debug.h" /**< OSAL debug management */ 44 45/** 46 * component includes */ 47#include "M4VFL_transition.h" /**< video effects */ 48 49/*for transition behaviour*/ 50#include <math.h> 51 52/************************************************************************/ 53/* Static local functions */ 54/************************************************************************/ 55 56static M4OSA_ERR M4VSS3GPP_intCheckVideoMode( 57 M4VSS3GPP_InternalEditContext *pC ); 58static M4OSA_Void 59M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC, 60 M4OSA_UInt8 uiClipNumber ); 61static M4OSA_ERR 62M4VSS3GPP_intApplyVideoEffect( M4VSS3GPP_InternalEditContext *pC,/*M4OSA_UInt8 uiClip1orClip2,*/ 63 M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut ); 64static M4OSA_ERR 65M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC, 66 M4VIFI_ImagePlane *pPlaneOut ); 67 68static M4OSA_Void 69M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC, 70 M4SYS_AccessUnit *pAU ); 71static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer, 72 M4OSA_UInt8 uiCts ); 73static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer, 74 M4OSA_UInt32 uiCtsSec ); 75static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer, 76 M4OSA_UInt32 *pCtsSec ); 77static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes, 78 M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight ); 79 80/** 81 ****************************************************************************** 82 * M4OSA_ERR M4VSS3GPP_intEditStepVideo() 83 * @brief One step of video processing 84 * @param pC (IN/OUT) Internal edit context 85 ****************************************************************************** 86 */ 87M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC ) 88{ 89 M4OSA_ERR err; 90 M4OSA_Int32 iCts, iNextCts; 91 M4ENCODER_FrameMode FrameMode; 92 M4OSA_Bool bSkipFrame; 93 M4OSA_UInt16 offset; 94 95 /** 96 * Check if we reached end cut. Decorrelate input and output encoding 97 * timestamp to handle encoder prefetch 98 */ 99 if ( ((M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset 100 + pC->iInOutTimeOffset) >= pC->pC1->iEndTime ) 101 { 102 /* Re-adjust video to precise cut time */ 103 pC->iInOutTimeOffset = ((M4OSA_Int32)(pC->ewc.dInputVidCts)) 104 - pC->pC1->iVoffset + pC->iInOutTimeOffset - pC->pC1->iEndTime; 105 if ( pC->iInOutTimeOffset < 0 ) { 106 pC->iInOutTimeOffset = 0; 107 } 108 109 /** 110 * Video is done for this clip */ 111 err = M4VSS3GPP_intReachedEndOfVideo(pC); 112 113 /* RC: to know when a file has been processed */ 114 if (M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP) 115 { 116 M4OSA_TRACE1_1( 117 "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intReachedEndOfVideo returns 0x%x", 118 err); 119 } 120 121 return err; 122 } 123 124 /* Don't change the states if we are in decodeUpTo() */ 125 if ( (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus) 126 && (( pC->pC2 == M4OSA_NULL) 127 || (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)) ) 128 { 129 /** 130 * Check Video Mode, depending on the current output CTS */ 131 err = M4VSS3GPP_intCheckVideoMode( 132 pC); /**< This function change the pC->Vstate variable! */ 133 134 if (M4NO_ERROR != err) 135 { 136 M4OSA_TRACE1_1( 137 "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intCheckVideoMode returns 0x%x!", 138 err); 139 return err; 140 } 141 } 142 143 144 switch( pC->Vstate ) 145 { 146 /* _________________ */ 147 /*| |*/ 148 /*| READ_WRITE MODE |*/ 149 /*|_________________|*/ 150 151 case M4VSS3GPP_kEditVideoState_READ_WRITE: 152 case M4VSS3GPP_kEditVideoState_AFTER_CUT: 153 { 154 M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo READ_WRITE"); 155 156 bSkipFrame = M4OSA_FALSE; 157 158 /** 159 * If we were decoding the clip, we must jump to be sure 160 * to get to the good position. */ 161 if( M4VSS3GPP_kClipStatus_READ != pC->pC1->Vstatus ) 162 { 163 /** 164 * Jump to target video time (tc = to-T) */ 165 // Decorrelate input and output encoding timestamp to handle encoder prefetch 166 iCts = (M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset; 167 err = pC->pC1->ShellAPI.m_pReader->m_pFctJump( 168 pC->pC1->pReaderContext, 169 (M4_StreamHandler *)pC->pC1->pVideoStream, &iCts); 170 171 if( M4NO_ERROR != err ) 172 { 173 M4OSA_TRACE1_1( 174 "M4VSS3GPP_intEditStepVideo:\ 175 READ_WRITE: m_pReader->m_pFctJump(V1) returns 0x%x!", 176 err); 177 return err; 178 } 179 180 err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu( 181 pC->pC1->pReaderContext, 182 (M4_StreamHandler *)pC->pC1->pVideoStream, 183 &pC->pC1->VideoAU); 184 185 if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) ) 186 { 187 M4OSA_TRACE1_1( 188 "M4VSS3GPP_intEditStepVideo:\ 189 READ_WRITE: m_pReader->m_pFctGetNextAu returns 0x%x!", 190 err); 191 return err; 192 } 193 194 M4OSA_TRACE2_3("A .... read : cts = %.0f + %ld [ 0x%x ]", 195 pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset, 196 pC->pC1->VideoAU.m_size); 197 198 /* This frame has been already written in BEGIN CUT step -> skip it */ 199 if( pC->pC1->VideoAU.m_CTS == iCts 200 && pC->pC1->iVideoRenderCts >= iCts ) 201 { 202 bSkipFrame = M4OSA_TRUE; 203 } 204 } 205 206 /* This frame has been already written in BEGIN CUT step -> skip it */ 207 if( ( pC->Vstate == M4VSS3GPP_kEditVideoState_AFTER_CUT) 208 && (pC->pC1->VideoAU.m_CTS 209 + pC->pC1->iVoffset <= pC->ewc.WriterVideoAU.CTS) ) 210 { 211 bSkipFrame = M4OSA_TRUE; 212 } 213 214 /** 215 * Remember the clip reading state */ 216 pC->pC1->Vstatus = M4VSS3GPP_kClipStatus_READ; 217 // Decorrelate input and output encoding timestamp to handle encoder prefetch 218 // Rounding is to compensate reader imprecision (m_CTS is actually an integer) 219 iCts = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pC->pC1->iVoffset - 1; 220 iNextCts = iCts + ((M4OSA_Int32)pC->dOutputFrameDuration) + 1; 221 /* Avoid to write a last frame of duration 0 */ 222 if( iNextCts > pC->pC1->iEndTime ) 223 iNextCts = pC->pC1->iEndTime; 224 225 /** 226 * If the AU is good to be written, write it, else just skip it */ 227 if( ( M4OSA_FALSE == bSkipFrame) 228 && (( pC->pC1->VideoAU.m_CTS >= iCts) 229 && (pC->pC1->VideoAU.m_CTS < iNextCts) 230 && (pC->pC1->VideoAU.m_size > 0)) ) 231 { 232 /** 233 * Get the output AU to write into */ 234 err = pC->ShellAPI.pWriterDataFcts->pStartAU( 235 pC->ewc.p3gpWriterContext, 236 M4VSS3GPP_WRITER_VIDEO_STREAM_ID, 237 &pC->ewc.WriterVideoAU); 238 239 if( M4NO_ERROR != err ) 240 { 241 M4OSA_TRACE1_1( 242 "M4VSS3GPP_intEditStepVideo: READ_WRITE:\ 243 pWriterDataFcts->pStartAU(Video) returns 0x%x!", 244 err); 245 return err; 246 } 247 248 /** 249 * Copy the input AU to the output AU */ 250 pC->ewc.WriterVideoAU.attribute = pC->pC1->VideoAU.m_attribute; 251 // Decorrelate input and output encoding timestamp to handle encoder prefetch 252 pC->ewc.WriterVideoAU.CTS = (M4OSA_Time)pC->pC1->VideoAU.m_CTS + 253 (M4OSA_Time)pC->pC1->iVoffset; 254 pC->ewc.dInputVidCts += pC->dOutputFrameDuration; 255 offset = 0; 256 /* for h.264 stream do not read the 1st 4 bytes as they are header 257 indicators */ 258 if( pC->pC1->pVideoStream->m_basicProperties.m_streamType 259 == M4DA_StreamTypeVideoMpeg4Avc ) 260 offset = 4; 261 262 pC->ewc.WriterVideoAU.size = pC->pC1->VideoAU.m_size - offset; 263 if( pC->ewc.WriterVideoAU.size > pC->ewc.uiVideoMaxAuSize ) 264 { 265 M4OSA_TRACE1_2( 266 "M4VSS3GPP_intEditStepVideo: READ_WRITE: AU size greater than\ 267 MaxAuSize (%d>%d)! returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE", 268 pC->ewc.WriterVideoAU.size, pC->ewc.uiVideoMaxAuSize); 269 return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE; 270 } 271 272 M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterVideoAU.dataAddress, 273 (pC->pC1->VideoAU.m_dataAddress + offset), 274 (pC->ewc.WriterVideoAU.size)); 275 276 /** 277 * Update time info for the Counter Time System to be equal to the bit 278 -stream time*/ 279 M4VSS3GPP_intUpdateTimeInfo(pC, &pC->ewc.WriterVideoAU); 280 M4OSA_TRACE2_2("B ---- write : cts = %lu [ 0x%x ]", 281 pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size); 282 283 /** 284 * Write the AU */ 285 err = pC->ShellAPI.pWriterDataFcts->pProcessAU( 286 pC->ewc.p3gpWriterContext, 287 M4VSS3GPP_WRITER_VIDEO_STREAM_ID, 288 &pC->ewc.WriterVideoAU); 289 290 if( M4NO_ERROR != err ) 291 { 292 /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output 293 file size is reached 294 The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE 295 is returned*/ 296 if( M4WAR_WRITER_STOP_REQ == err ) 297 { 298 M4OSA_TRACE1_0( 299 "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize"); 300 return M4VSS3GPP_WAR_EDITING_DONE; 301 } 302 else 303 { 304 M4OSA_TRACE1_1( 305 "M4VSS3GPP_intEditStepVideo: READ_WRITE:\ 306 pWriterDataFcts->pProcessAU(Video) returns 0x%x!", 307 err); 308 return err; 309 } 310 } 311 312 /** 313 * Read next AU for next step */ 314 err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu( 315 pC->pC1->pReaderContext, 316 (M4_StreamHandler *)pC->pC1->pVideoStream, 317 &pC->pC1->VideoAU); 318 319 if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) ) 320 { 321 M4OSA_TRACE1_1( 322 "M4VSS3GPP_intEditStepVideo: READ_WRITE:\ 323 m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!", 324 err); 325 return err; 326 } 327 328 M4OSA_TRACE2_3("C .... read : cts = %.0f + %ld [ 0x%x ]", 329 pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset, 330 pC->pC1->VideoAU.m_size); 331 } 332 else 333 { 334 /** 335 * Decide wether to read or to increment time increment */ 336 if( ( pC->pC1->VideoAU.m_size == 0) 337 || (pC->pC1->VideoAU.m_CTS >= iNextCts) ) 338 { 339 /*Increment time by the encoding period (NO_MORE_AU or reader in advance */ 340 // Decorrelate input and output encoding timestamp to handle encoder prefetch 341 pC->ewc.dInputVidCts += pC->dOutputFrameDuration; 342 343 /* Switch (from AFTER_CUT) to normal mode because time is 344 no more frozen */ 345 pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE; 346 } 347 else 348 { 349 /* In other cases (reader late), just let the reader catch up 350 pC->ewc.dVTo */ 351 err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu( 352 pC->pC1->pReaderContext, 353 (M4_StreamHandler *)pC->pC1->pVideoStream, 354 &pC->pC1->VideoAU); 355 356 if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) ) 357 { 358 M4OSA_TRACE1_1( 359 "M4VSS3GPP_intEditStepVideo: READ_WRITE:\ 360 m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!", 361 err); 362 return err; 363 } 364 365 M4OSA_TRACE2_3("D .... read : cts = %.0f + %ld [ 0x%x ]", 366 pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset, 367 pC->pC1->VideoAU.m_size); 368 } 369 } 370 } 371 break; 372 373 /* ____________________ */ 374 /*| |*/ 375 /*| DECODE_ENCODE MODE |*/ 376 /*| BEGIN_CUT MODE |*/ 377 /*|____________________|*/ 378 379 case M4VSS3GPP_kEditVideoState_DECODE_ENCODE: 380 case M4VSS3GPP_kEditVideoState_BEGIN_CUT: 381 { 382 M4OSA_TRACE3_0( 383 "M4VSS3GPP_intEditStepVideo DECODE_ENCODE / BEGIN_CUT"); 384 385 /** 386 * Decode the video up to the target time 387 (will jump to the previous RAP if needed ) */ 388 // Decorrelate input and output encoding timestamp to handle encoder prefetch 389 err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1, (M4OSA_Int32)pC->ewc.dInputVidCts); 390 if( M4NO_ERROR != err ) 391 { 392 M4OSA_TRACE1_1( 393 "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\ 394 M4VSS3GPP_intDecodeVideoUpToCts returns err=0x%x", 395 err); 396 return err; 397 } 398 399 /* If the decoding is not completed, do one more step with time frozen */ 400 if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus ) 401 { 402 return M4NO_ERROR; 403 } 404 405 /** 406 * Reset the video pre-processing error before calling the encoder */ 407 pC->ewc.VppError = M4NO_ERROR; 408 409 M4OSA_TRACE2_0("E ++++ encode AU"); 410 411 /** 412 * Encode the frame(rendering,filtering and writing will be done 413 in encoder callbacks)*/ 414 if( pC->Vstate == M4VSS3GPP_kEditVideoState_BEGIN_CUT ) 415 FrameMode = M4ENCODER_kIFrame; 416 else 417 FrameMode = M4ENCODER_kNormalFrame; 418 419 // Decorrelate input and output encoding timestamp to handle encoder prefetch 420 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL, 421 pC->ewc.dInputVidCts, FrameMode); 422 /** 423 * Check if we had a VPP error... */ 424 if( M4NO_ERROR != pC->ewc.VppError ) 425 { 426 M4OSA_TRACE1_1( 427 "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\ 428 pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x", 429 pC->ewc.VppError); 430#ifdef M4VSS_SUPPORT_OMX_CODECS 431 432 if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError ) 433 { 434#endif //M4VSS_SUPPORT_OMX_CODECS 435 436 return pC->ewc.VppError; 437#ifdef M4VSS_SUPPORT_OMX_CODECS 438 439 } 440 441#endif //M4VSS_SUPPORT_OMX_CODECS 442 443 } 444 else if( M4NO_ERROR != err ) /**< ...or an encoder error */ 445 { 446 if( ((M4OSA_UInt32)M4ERR_ALLOC) == err ) 447 { 448 M4OSA_TRACE1_0( 449 "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\ 450 returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR"); 451 return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR; 452 } 453 /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output 454 file size is reached 455 The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE 456 is returned*/ 457 else if( M4WAR_WRITER_STOP_REQ == err ) 458 { 459 M4OSA_TRACE1_0( 460 "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize"); 461 return M4VSS3GPP_WAR_EDITING_DONE; 462 } 463 else 464 { 465 M4OSA_TRACE1_1( 466 "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\ 467 pVideoEncoderGlobalFcts->pFctEncode returns 0x%x", 468 err); 469 return err; 470 } 471 } 472 473 /** 474 * Increment time by the encoding period (for begin cut, do not increment to not 475 loose P-frames) */ 476 if( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate ) 477 { 478 // Decorrelate input and output encoding timestamp to handle encoder prefetch 479 pC->ewc.dInputVidCts += pC->dOutputFrameDuration; 480 } 481 } 482 break; 483 484 /* _________________ */ 485 /*| |*/ 486 /*| TRANSITION MODE |*/ 487 /*|_________________|*/ 488 489 case M4VSS3GPP_kEditVideoState_TRANSITION: 490 { 491 M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo TRANSITION"); 492 493 /* Don't decode more than needed */ 494 if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus) 495 && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus)) ) 496 { 497 /** 498 * Decode the clip1 video up to the target time 499 (will jump to the previous RAP if needed */ 500 // Decorrelate input and output encoding timestamp to handle encoder prefetch 501 err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1, 502 (M4OSA_Int32)pC->ewc.dInputVidCts); 503 if( M4NO_ERROR != err ) 504 { 505 M4OSA_TRACE1_1( 506 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 507 M4VSS3GPP_intDecodeVideoUpToCts(C1) returns err=0x%x", 508 err); 509 return err; 510 } 511 512 /* If the decoding is not completed, do one more step with time frozen */ 513 if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus ) 514 { 515 return M4NO_ERROR; 516 } 517 } 518 519 /* Don't decode more than needed */ 520 if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus) 521 && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus)) ) 522 { 523 /** 524 * Decode the clip2 video up to the target time 525 (will jump to the previous RAP if needed) */ 526 // Decorrelate input and output encoding timestamp to handle encoder prefetch 527 err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC2, 528 (M4OSA_Int32)pC->ewc.dInputVidCts); 529 if( M4NO_ERROR != err ) 530 { 531 M4OSA_TRACE1_1( 532 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 533 M4VSS3GPP_intDecodeVideoUpToCts(C2) returns err=0x%x", 534 err); 535 return err; 536 } 537 538 /* If the decoding is not completed, do one more step with time frozen */ 539 if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus ) 540 { 541 return M4NO_ERROR; 542 } 543 } 544 545 /** 546 * Reset the video pre-processing error before calling the encoder */ 547 pC->ewc.VppError = M4NO_ERROR; 548 549 M4OSA_TRACE2_0("F **** blend AUs"); 550 551 /** 552 * Encode the frame (rendering, filtering and writing will be done 553 in encoder callbacks */ 554 // Decorrelate input and output encoding timestamp to handle encoder prefetch 555 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL, 556 pC->ewc.dInputVidCts, M4ENCODER_kNormalFrame); 557 558 /** 559 * If encode returns a process frame error, it is likely to be a VPP error */ 560 if( M4NO_ERROR != pC->ewc.VppError ) 561 { 562 M4OSA_TRACE1_1( 563 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 564 pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x", 565 pC->ewc.VppError); 566#ifdef M4VSS_SUPPORT_OMX_CODECS 567 568 if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError ) 569 { 570 571#endif //M4VSS_SUPPORT_OMX_CODECS 572 573 return pC->ewc.VppError; 574#ifdef M4VSS_SUPPORT_OMX_CODECS 575 576 } 577 578#endif //M4VSS_SUPPORT_OMX_CODECS 579 580 } 581 else if( M4NO_ERROR != err ) /**< ...or an encoder error */ 582 { 583 if( ((M4OSA_UInt32)M4ERR_ALLOC) == err ) 584 { 585 M4OSA_TRACE1_0( 586 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 587 returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR"); 588 return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR; 589 } 590 591 /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output 592 file size is reached 593 The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE is 594 returned*/ 595 else if( M4WAR_WRITER_STOP_REQ == err ) 596 { 597 M4OSA_TRACE1_0( 598 "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize"); 599 return M4VSS3GPP_WAR_EDITING_DONE; 600 } 601 else 602 { 603 M4OSA_TRACE1_1( 604 "M4VSS3GPP_intEditStepVideo: TRANSITION:\ 605 pVideoEncoderGlobalFcts->pFctEncode returns 0x%x", 606 err); 607 return err; 608 } 609 } 610 611 /** 612 * Increment time by the encoding period */ 613 // Decorrelate input and output encoding timestamp to handle encoder prefetch 614 pC->ewc.dInputVidCts += pC->dOutputFrameDuration; 615 } 616 break; 617 618 /* ____________ */ 619 /*| |*/ 620 /*| ERROR CASE |*/ 621 /*|____________|*/ 622 623 default: 624 M4OSA_TRACE1_1( 625 "M4VSS3GPP_intEditStepVideo: invalid internal state (0x%x),\ 626 returning M4VSS3GPP_ERR_INTERNAL_STATE", 627 pC->Vstate); 628 return M4VSS3GPP_ERR_INTERNAL_STATE; 629 } 630 631 /** 632 * Return with no error */ 633 M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo: returning M4NO_ERROR"); 634 return M4NO_ERROR; 635} 636 637/** 638 ****************************************************************************** 639 * M4OSA_ERR M4VSS3GPP_intCheckVideoMode() 640 * @brief Check which video process mode we must use, depending on the output CTS. 641 * @param pC (IN/OUT) Internal edit context 642 ****************************************************************************** 643 */ 644static M4OSA_ERR M4VSS3GPP_intCheckVideoMode( 645 M4VSS3GPP_InternalEditContext *pC ) 646{ 647 M4OSA_ERR err; 648 // Decorrelate input and output encoding timestamp to handle encoder prefetch 649 const M4OSA_Int32 t = (M4OSA_Int32)pC->ewc.dInputVidCts; 650 /**< Transition duration */ 651 const M4OSA_Int32 TD = pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration; 652 653 M4OSA_Int32 iTmp; 654 655 const M4VSS3GPP_EditVideoState previousVstate = pC->Vstate; 656 657 /** 658 * Check if Clip1 is on its begin cut, or in an effect zone */ 659 M4VSS3GPP_intCheckVideoEffects(pC, 1); 660 661 /** 662 * Check if we are in the transition with next clip */ 663 if( ( TD > 0) && (( t - pC->pC1->iVoffset) >= (pC->pC1->iEndTime - TD)) ) 664 { 665 /** 666 * We are in a transition */ 667 pC->Vstate = M4VSS3GPP_kEditVideoState_TRANSITION; 668 pC->bTransitionEffect = M4OSA_TRUE; 669 670 /** 671 * Open second clip for transition, if not yet opened */ 672 if( M4OSA_NULL == pC->pC2 ) 673 { 674 err = M4VSS3GPP_intOpenClip(pC, &pC->pC2, 675 &pC->pClipList[pC->uiCurrentClip + 1]); 676 677 if( M4NO_ERROR != err ) 678 { 679 M4OSA_TRACE1_1( 680 "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_editOpenClip returns 0x%x!", 681 err); 682 return err; 683 } 684 685 /** 686 * Add current video output CTS to the clip offset 687 * (audio output CTS is not yet at the transition, so audio 688 * offset can't be updated yet). */ 689 // Decorrelate input and output encoding timestamp to handle encoder prefetch 690 pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts; 691 692 /** 693 * 2005-03-24: BugFix for audio-video synchro: 694 * Update transition duration due to the actual video transition beginning time. 695 * It will avoid desynchronization when doing the audio transition. */ 696 // Decorrelate input and output encoding timestamp to handle encoder prefetch 697 iTmp = ((M4OSA_Int32)pC->ewc.dInputVidCts)\ 698 - (pC->pC1->iEndTime - TD + pC->pC1->iVoffset); 699 if (iTmp < (M4OSA_Int32)pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration) 700 /**< Test in case of a very short transition */ 701 { 702 pC->pTransitionList[pC-> 703 uiCurrentClip].uiTransitionDuration -= iTmp; 704 705 /** 706 * Don't forget to also correct the total duration used for the progress bar 707 * (it was computed with the original transition duration). */ 708 pC->ewc.iOutputDuration += iTmp; 709 } 710 /**< No "else" here because it's hard predict the effect of 0 duration transition...*/ 711 } 712 713 /** 714 * Check effects for clip2 */ 715 M4VSS3GPP_intCheckVideoEffects(pC, 2); 716 } 717 else 718 { 719 /** 720 * We are not in a transition */ 721 pC->bTransitionEffect = M4OSA_FALSE; 722 723 /* If there is an effect we go to decode/encode mode */ 724 if ((pC->nbActiveEffects > 0) ||(pC->nbActiveEffects1 > 0)) 725 { 726 pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE; 727 } 728 /* We do a begin cut, except if already done (time is not progressing because we want 729 to catch all P-frames after the cut) */ 730 else if( M4OSA_TRUE == pC->bClip1AtBeginCut ) 731 { 732 if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate) 733 || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) ) 734 pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT; 735 else 736 pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT; 737 } 738 /* Else we are in default copy/paste mode */ 739 else 740 { 741 if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate) 742 || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) ) 743 { 744 pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT; 745 } 746 else if( pC->bIsMMS == M4OSA_TRUE ) 747 { 748 M4OSA_UInt32 currentBitrate; 749 M4OSA_ERR err = M4NO_ERROR; 750 751 /* Do we need to reencode the video to downgrade the bitrate or not ? */ 752 /* Let's compute the cirrent bitrate of the current edited clip */ 753 err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption( 754 pC->pC1->pReaderContext, 755 M4READER_kOptionID_Bitrate, ¤tBitrate); 756 757 if( err != M4NO_ERROR ) 758 { 759 M4OSA_TRACE1_1( 760 "M4VSS3GPP_intCheckVideoMode:\ 761 Error when getting next bitrate of edited clip: 0x%x", 762 err); 763 return err; 764 } 765 766 /* Remove audio bitrate */ 767 currentBitrate -= 12200; 768 769 /* Test if we go into copy/paste mode or into decode/encode mode */ 770 if( currentBitrate > pC->uiMMSVideoBitrate ) 771 { 772 pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE; 773 } 774 else 775 { 776 pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE; 777 } 778 } 779 else if(!((pC->m_bClipExternalHasStarted == M4OSA_TRUE) && 780 (pC->Vstate == M4VSS3GPP_kEditVideoState_DECODE_ENCODE))) 781 { 782 /** 783 * Test if we go into copy/paste mode or into decode/encode mode 784 * If an external effect has been applied on the current clip 785 * then continue to be in decode/encode mode till end of 786 * clip to avoid H.264 distortion. 787 */ 788 pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE; 789 } 790 } 791 } 792 793 /** 794 * Check if we create an encoder */ 795 if( ( ( M4VSS3GPP_kEditVideoState_READ_WRITE == previousVstate) 796 || (M4VSS3GPP_kEditVideoState_AFTER_CUT 797 == previousVstate)) /**< read mode */ 798 && (( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate) 799 || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == pC->Vstate) 800 || (M4VSS3GPP_kEditVideoState_TRANSITION 801 == pC->Vstate)) /**< encode mode */ 802 && pC->bIsMMS == M4OSA_FALSE ) 803 { 804 /** 805 * Create the encoder */ 806 err = M4VSS3GPP_intCreateVideoEncoder(pC); 807 808 if( M4NO_ERROR != err ) 809 { 810 M4OSA_TRACE1_1( 811 "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!", 812 err); 813 return err; 814 } 815 } 816 else if( pC->bIsMMS == M4OSA_TRUE && pC->ewc.pEncContext == M4OSA_NULL ) 817 { 818 /** 819 * Create the encoder */ 820 err = M4VSS3GPP_intCreateVideoEncoder(pC); 821 822 if( M4NO_ERROR != err ) 823 { 824 M4OSA_TRACE1_1( 825 "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!", 826 err); 827 return err; 828 } 829 } 830 831 /** 832 * When we go from filtering to read/write, we must act like a begin cut, 833 * because the last filtered image may be different than the original image. */ 834 else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate) 835 || (M4VSS3GPP_kEditVideoState_TRANSITION 836 == previousVstate)) /**< encode mode */ 837 && (M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate) /**< read mode */ 838 ) 839 { 840 pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT; 841 } 842 843 /** 844 * Check if we destroy an encoder */ 845 else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate) 846 || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate) 847 || (M4VSS3GPP_kEditVideoState_TRANSITION 848 == previousVstate)) /**< encode mode */ 849 && (( M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate) 850 || (M4VSS3GPP_kEditVideoState_AFTER_CUT 851 == pC->Vstate)) /**< read mode */ 852 && pC->bIsMMS == M4OSA_FALSE ) 853 { 854 /** 855 * Destroy the previously created encoder */ 856 err = M4VSS3GPP_intDestroyVideoEncoder(pC); 857 858 if( M4NO_ERROR != err ) 859 { 860 M4OSA_TRACE1_1( 861 "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intDestroyVideoEncoder returns 0x%x!", 862 err); 863 return err; 864 } 865 } 866 867 /** 868 * Return with no error */ 869 M4OSA_TRACE3_0("M4VSS3GPP_intCheckVideoMode: returning M4NO_ERROR"); 870 return M4NO_ERROR; 871} 872 873/****************************************************************************** 874 * M4OSA_ERR M4VSS3GPP_intStartAU() 875 * @brief StartAU writer-like interface used for the VSS 3GPP only 876 * @note 877 * @param pContext: (IN) It is the VSS 3GPP context in our case 878 * @param streamID: (IN) Id of the stream to which the Access Unit is related. 879 * @param pAU: (IN/OUT) Access Unit to be prepared. 880 * @return M4NO_ERROR: there is no error 881 ****************************************************************************** 882 */ 883M4OSA_ERR M4VSS3GPP_intStartAU( M4WRITER_Context pContext, 884 M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU ) 885{ 886 M4OSA_ERR err; 887 M4OSA_UInt32 uiMaxAuSize; 888 889 /** 890 * Given context is actually the VSS3GPP context */ 891 M4VSS3GPP_InternalEditContext *pC = 892 (M4VSS3GPP_InternalEditContext *)pContext; 893 894 /** 895 * Get the output AU to write into */ 896 err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext, 897 M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU); 898 899 if( M4NO_ERROR != err ) 900 { 901 M4OSA_TRACE1_1( 902 "M4VSS3GPP_intStartAU: pWriterDataFcts->pStartAU(Video) returns 0x%x!", 903 err); 904 return err; 905 } 906 907 /** 908 * Return */ 909 M4OSA_TRACE3_0("M4VSS3GPP_intStartAU: returning M4NO_ERROR"); 910 return M4NO_ERROR; 911} 912 913/****************************************************************************** 914 * M4OSA_ERR M4VSS3GPP_intProcessAU() 915 * @brief ProcessAU writer-like interface used for the VSS 3GPP only 916 * @note 917 * @param pContext: (IN) It is the VSS 3GPP context in our case 918 * @param streamID: (IN) Id of the stream to which the Access Unit is related. 919 * @param pAU: (IN/OUT) Access Unit to be written 920 * @return M4NO_ERROR: there is no error 921 ****************************************************************************** 922 */ 923M4OSA_ERR M4VSS3GPP_intProcessAU( M4WRITER_Context pContext, 924 M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU ) 925{ 926 M4OSA_ERR err; 927 928 /** 929 * Given context is actually the VSS3GPP context */ 930 M4VSS3GPP_InternalEditContext *pC = 931 (M4VSS3GPP_InternalEditContext *)pContext; 932 933 /** 934 * Fix the encoded AU time */ 935 // Decorrelate input and output encoding timestamp to handle encoder prefetch 936 pC->ewc.dOutputVidCts = pAU->CTS; 937 /** 938 * Update time info for the Counter Time System to be equal to the bit-stream time */ 939 M4VSS3GPP_intUpdateTimeInfo(pC, pAU); 940 941 /** 942 * Write the AU */ 943 err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext, 944 M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU); 945 946 if( M4NO_ERROR != err ) 947 { 948 M4OSA_TRACE1_1( 949 "M4VSS3GPP_intProcessAU: pWriterDataFcts->pProcessAU(Video) returns 0x%x!", 950 err); 951 return err; 952 } 953 954 /** 955 * Return */ 956 M4OSA_TRACE3_0("M4VSS3GPP_intProcessAU: returning M4NO_ERROR"); 957 return M4NO_ERROR; 958} 959 960/** 961 ****************************************************************************** 962 * M4OSA_ERR M4VSS3GPP_intVPP() 963 * @brief We implement our own VideoPreProcessing function 964 * @note It is called by the video encoder 965 * @param pContext (IN) VPP context, which actually is the VSS 3GPP context in our case 966 * @param pPlaneIn (IN) 967 * @param pPlaneOut (IN/OUT) Pointer to an array of 3 planes that will contain the output 968 * YUV420 image 969 * @return M4NO_ERROR: No error 970 ****************************************************************************** 971 */ 972M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn, 973 M4VIFI_ImagePlane *pPlaneOut ) 974{ 975 M4OSA_ERR err; 976 M4_MediaTime t; 977 M4VIFI_ImagePlane *pTmp = M4OSA_NULL; 978 M4VIFI_ImagePlane pTemp1[3],pTemp2[3]; 979 M4OSA_UInt32 i =0; 980 /** 981 * VPP context is actually the VSS3GPP context */ 982 M4VSS3GPP_InternalEditContext *pC = 983 (M4VSS3GPP_InternalEditContext *)pContext; 984 pTemp1[0].pac_data = pTemp2[0].pac_data = M4OSA_NULL; 985 /** 986 * Reset VPP error remembered in context */ 987 pC->ewc.VppError = M4NO_ERROR; 988 989 /** 990 * At the end of the editing, we may be called when no more clip is loaded. 991 * (because to close the encoder properly it must be stepped one or twice...) */ 992 if( M4OSA_NULL == pC->pC1 ) 993 { 994 /** 995 * We must fill the input of the encoder with a dummy image, because 996 * encoding noise leads to a huge video AU, and thus a writer buffer overflow. */ 997 M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[0].pac_data, 998 pPlaneOut[0].u_stride * pPlaneOut[0].u_height, 0); 999 M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[1].pac_data, 1000 pPlaneOut[1].u_stride * pPlaneOut[1].u_height, 0); 1001 M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[2].pac_data, 1002 pPlaneOut[2].u_stride * pPlaneOut[2].u_height, 0); 1003 1004 M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR (abort)"); 1005 return M4NO_ERROR; 1006 } 1007 1008 /** 1009 **************** Transition case ****************/ 1010 if( M4OSA_TRUE == pC->bTransitionEffect ) 1011 { 1012 if (M4OSA_NULL == pTemp1[0].pac_data) 1013 { 1014 err = M4VSS3GPP_intAllocateYUV420(pTemp1, pC->ewc.uiVideoWidth, 1015 pC->ewc.uiVideoHeight); 1016 if (M4NO_ERROR != err) 1017 { 1018 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(1) returns 0x%x, \ 1019 returning M4NO_ERROR", err); 1020 pC->ewc.VppError = err; 1021 return M4NO_ERROR; /**< Return no error to the encoder core 1022 (else it may leak in some situations...) */ 1023 } 1024 } 1025 if (M4OSA_NULL == pTemp2[0].pac_data) 1026 { 1027 err = M4VSS3GPP_intAllocateYUV420(pTemp2, pC->ewc.uiVideoWidth, 1028 pC->ewc.uiVideoHeight); 1029 if (M4NO_ERROR != err) 1030 { 1031 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(2) returns 0x%x, \ 1032 returning M4NO_ERROR", err); 1033 pC->ewc.VppError = err; 1034 return M4NO_ERROR; /**< Return no error to the encoder core 1035 (else it may leak in some situations...) */ 1036 } 1037 } 1038 /** 1039 * We need two intermediate planes */ 1040 if( M4OSA_NULL == pC->yuv1[0].pac_data ) 1041 { 1042 err = M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth, 1043 pC->ewc.uiVideoHeight); 1044 1045 if( M4NO_ERROR != err ) 1046 { 1047 M4OSA_TRACE1_1( 1048 "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\ 1049 returning M4NO_ERROR", 1050 err); 1051 pC->ewc.VppError = err; 1052 return 1053 M4NO_ERROR; /**< Return no error to the encoder core 1054 (else it may leak in some situations...) */ 1055 } 1056 } 1057 1058 if( M4OSA_NULL == pC->yuv2[0].pac_data ) 1059 { 1060 err = M4VSS3GPP_intAllocateYUV420(pC->yuv2, pC->ewc.uiVideoWidth, 1061 pC->ewc.uiVideoHeight); 1062 1063 if( M4NO_ERROR != err ) 1064 { 1065 M4OSA_TRACE1_1( 1066 "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\ 1067 returning M4NO_ERROR", 1068 err); 1069 pC->ewc.VppError = err; 1070 return 1071 M4NO_ERROR; /**< Return no error to the encoder core 1072 (else it may leak in some situations...) */ 1073 } 1074 } 1075 1076 /** 1077 * Allocate new temporary plane if needed */ 1078 if( M4OSA_NULL == pC->yuv3[0].pac_data ) 1079 { 1080 err = M4VSS3GPP_intAllocateYUV420(pC->yuv3, pC->ewc.uiVideoWidth, 1081 pC->ewc.uiVideoHeight); 1082 1083 if( M4NO_ERROR != err ) 1084 { 1085 M4OSA_TRACE1_1( 1086 "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\ 1087 returning M4NO_ERROR", 1088 err); 1089 pC->ewc.VppError = err; 1090 return 1091 M4NO_ERROR; /**< Return no error to the encoder core 1092 (else it may leak in some situations...) */ 1093 } 1094 } 1095 1096 /** 1097 * Compute the time in the clip1 base: t = to - Offset */ 1098 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1099 t = pC->ewc.dInputVidCts - pC->pC1->iVoffset; 1100 1101 /** 1102 * Render Clip1 */ 1103 if( pC->pC1->isRenderDup == M4OSA_FALSE ) 1104 { 1105 if(pC->nbActiveEffects > 0) 1106 { 1107 err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC1->pViDecCtxt, 1108 &t, pTemp1, 1109 M4OSA_TRUE); 1110 if (M4NO_ERROR != err) 1111 { 1112 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C1) returns 0x%x, \ 1113 returning M4NO_ERROR", err); 1114 pC->ewc.VppError = err; 1115 return M4NO_ERROR; /**< Return no error to the encoder core 1116 (else it may leak in some situations...) */ 1117 } 1118 pC->bIssecondClip = M4OSA_FALSE; 1119 err = M4VSS3GPP_intApplyVideoEffect(pC, pTemp1 ,pC->yuv1 ); 1120 if (M4NO_ERROR != err) 1121 { 1122 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x, \ 1123 returning M4NO_ERROR", err); 1124 pC->ewc.VppError = err; 1125 return M4NO_ERROR; /**< Return no error to the encoder core 1126 (else it may leak in some situations...) */ 1127 } 1128 pC->pC1->lastDecodedPlane = pTemp1; 1129 } 1130 else 1131 { 1132 err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC1->pViDecCtxt, 1133 &t, pC->yuv1, 1134 M4OSA_TRUE); 1135 if (M4NO_ERROR != err) 1136 { 1137 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C1) returns 0x%x, \ 1138 returning M4NO_ERROR", err); 1139 pC->ewc.VppError = err; 1140 return M4NO_ERROR; /**< Return no error to the encoder core 1141 (else it may leak in some situations...) */ 1142 } 1143 pC->pC1->lastDecodedPlane = pC->yuv1; 1144 } 1145 pC->pC1->iVideoRenderCts = (M4OSA_Int32)t; 1146 } 1147 else 1148 { 1149 /* Copy last decoded plane to output plane */ 1150 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data, 1151 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[0].pac_data, 1152 (pTmp[0].u_height * pTmp[0].u_width)); 1153 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data, 1154 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[1].pac_data, 1155 (pTmp[1].u_height * pTmp[1].u_width)); 1156 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data, 1157 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[2].pac_data, 1158 (pTmp[2].u_height * pTmp[2].u_width)); 1159 pC->pC1->lastDecodedPlane = pTmp; 1160 } 1161 1162 /** 1163 * Compute the time in the clip2 base: t = to - Offset */ 1164 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1165 t = pC->ewc.dInputVidCts - pC->pC2->iVoffset; 1166 /** 1167 * Render Clip2 */ 1168 if( pC->pC2->isRenderDup == M4OSA_FALSE ) 1169 { 1170 if(pC->nbActiveEffects1 > 0) 1171 { 1172 err = pC->pC2->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC2->pViDecCtxt, 1173 &t, pTemp2, 1174 M4OSA_TRUE); 1175 if (M4NO_ERROR != err) 1176 { 1177 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C2) returns 0x%x, \ 1178 returning M4NO_ERROR", err); 1179 pC->ewc.VppError = err; 1180 return M4NO_ERROR; /**< Return no error to the encoder core 1181 (else it may leak in some situations...) */ 1182 } 1183 1184 pC->bIssecondClip = M4OSA_TRUE; 1185 err = M4VSS3GPP_intApplyVideoEffect(pC, pTemp2 ,pC->yuv2); 1186 if (M4NO_ERROR != err) 1187 { 1188 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x, \ 1189 returning M4NO_ERROR", err); 1190 pC->ewc.VppError = err; 1191 return M4NO_ERROR; /**< Return no error to the encoder core 1192 (else it may leak in some situations...) */ 1193 } 1194 pC->pC2->lastDecodedPlane = pTemp2; 1195 } 1196 else 1197 { 1198 err = pC->pC2->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC2->pViDecCtxt, 1199 &t, pC->yuv2, 1200 M4OSA_TRUE); 1201 if (M4NO_ERROR != err) 1202 { 1203 M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C2) returns 0x%x, \ 1204 returning M4NO_ERROR", err); 1205 pC->ewc.VppError = err; 1206 return M4NO_ERROR; /**< Return no error to the encoder core 1207 (else it may leak in some situations...) */ 1208 } 1209 pC->pC2->lastDecodedPlane = pC->yuv2; 1210 } 1211 pC->pC2->iVideoRenderCts = (M4OSA_Int32)t; 1212 } 1213 else 1214 { 1215 /* Copy last decoded plane to output plane */ 1216 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data, 1217 (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[0].pac_data, 1218 (pTmp[0].u_height * pTmp[0].u_width)); 1219 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data, 1220 (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[1].pac_data, 1221 (pTmp[1].u_height * pTmp[1].u_width)); 1222 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data, 1223 (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[2].pac_data, 1224 (pTmp[2].u_height * pTmp[2].u_width)); 1225 pC->pC2->lastDecodedPlane = pTmp; 1226 } 1227 1228 1229 pTmp = pPlaneOut; 1230 err = M4VSS3GPP_intVideoTransition(pC, pTmp); 1231 1232 if( M4NO_ERROR != err ) 1233 { 1234 M4OSA_TRACE1_1( 1235 "M4VSS3GPP_intVPP: M4VSS3GPP_intVideoTransition returns 0x%x,\ 1236 returning M4NO_ERROR", 1237 err); 1238 pC->ewc.VppError = err; 1239 return M4NO_ERROR; /**< Return no error to the encoder core 1240 (else it may leak in some situations...) */ 1241 } 1242 for (i=0; i < 3; i++) 1243 { 1244 if (pTemp2[i].pac_data != M4OSA_NULL) 1245 { 1246 M4OSA_free((M4OSA_MemAddr32)pTemp2[i].pac_data); 1247 pTemp2[i].pac_data = M4OSA_NULL; 1248 } 1249 1250 1251 if (pTemp1[i].pac_data != M4OSA_NULL) 1252 { 1253 M4OSA_free((M4OSA_MemAddr32)pTemp1[i].pac_data); 1254 pTemp1[i].pac_data = M4OSA_NULL; 1255 } 1256 } 1257 } 1258 /** 1259 **************** No Transition case ****************/ 1260 else 1261 { 1262 /** 1263 * Check if there is a filter */ 1264 if( pC->nbActiveEffects > 0 ) 1265 { 1266 /** 1267 * If we do modify the image, we need an intermediate image plane */ 1268 if( M4OSA_NULL == pC->yuv1[0].pac_data ) 1269 { 1270 err = 1271 M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth, 1272 pC->ewc.uiVideoHeight); 1273 1274 if( M4NO_ERROR != err ) 1275 { 1276 M4OSA_TRACE1_1( 1277 "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420 returns 0x%x,\ 1278 returning M4NO_ERROR", 1279 err); 1280 pC->ewc.VppError = err; 1281 return 1282 M4NO_ERROR; /**< Return no error to the encoder core 1283 (else it may leak in some situations...) */ 1284 } 1285 } 1286 /** 1287 * The image is rendered in the intermediate image plane */ 1288 pTmp = pC->yuv1; 1289 } 1290 else 1291 { 1292 /** 1293 * No filter, the image is directly rendered in pPlaneOut */ 1294 pTmp = pPlaneOut; 1295 } 1296 1297 /** 1298 * Compute the time in the clip base: t = to - Offset */ 1299 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1300 t = pC->ewc.dInputVidCts - pC->pC1->iVoffset; 1301 1302 if( pC->pC1->isRenderDup == M4OSA_FALSE ) 1303 { 1304 err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender( 1305 pC->pC1->pViDecCtxt, &t, pTmp, M4OSA_TRUE); 1306 1307 if( M4NO_ERROR != err ) 1308 { 1309 M4OSA_TRACE1_1( 1310 "M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender returns 0x%x,\ 1311 returning M4NO_ERROR", 1312 err); 1313 pC->ewc.VppError = err; 1314 return 1315 M4NO_ERROR; /**< Return no error to the encoder core 1316 (else it may leak in some situations...) */ 1317 } 1318 pC->pC1->lastDecodedPlane = pTmp; 1319 pC->pC1->iVideoRenderCts = (M4OSA_Int32)t; 1320 } 1321 else 1322 { 1323 /* Copy last decoded plane to output plane */ 1324 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data, 1325 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[0].pac_data, 1326 (pTmp[0].u_height * pTmp[0].u_width)); 1327 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data, 1328 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[1].pac_data, 1329 (pTmp[1].u_height * pTmp[1].u_width)); 1330 M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data, 1331 (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[2].pac_data, 1332 (pTmp[2].u_height * pTmp[2].u_width)); 1333 pC->pC1->lastDecodedPlane = pTmp; 1334 } 1335 1336 M4OSA_TRACE3_1("M4VSS3GPP_intVPP: Rendered at CTS %.3f", t); 1337 1338 /** 1339 * Apply the clip1 effect */ 1340 // if (pC->iClip1ActiveEffect >= 0) 1341 if( pC->nbActiveEffects > 0 ) 1342 { 1343 err = M4VSS3GPP_intApplyVideoEffect(pC,/*1,*/ pC->yuv1, pPlaneOut); 1344 1345 if( M4NO_ERROR != err ) 1346 { 1347 M4OSA_TRACE1_1( 1348 "M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x,\ 1349 returning M4NO_ERROR", 1350 err); 1351 pC->ewc.VppError = err; 1352 return 1353 M4NO_ERROR; /**< Return no error to the encoder core 1354 (else it may leak in some situations...) */ 1355 } 1356 } 1357 } 1358 1359 /** 1360 * Return */ 1361 M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR"); 1362 return M4NO_ERROR; 1363} 1364 1365/** 1366 ****************************************************************************** 1367 * M4OSA_ERR M4VSS3GPP_intApplyVideoEffect() 1368 * @brief Apply video effect from pPlaneIn to pPlaneOut 1369 * @param pC (IN/OUT) Internal edit context 1370 * @param uiClip1orClip2 (IN/OUT) 1 for first clip, 2 for second clip 1371 * @param pInputPlanes (IN) Input raw YUV420 image 1372 * @param pOutputPlanes (IN/OUT) Output raw YUV420 image 1373 * @return M4NO_ERROR: No error 1374 ****************************************************************************** 1375 */ 1376static M4OSA_ERR 1377M4VSS3GPP_intApplyVideoEffect( M4VSS3GPP_InternalEditContext *pC, 1378 M4VIFI_ImagePlane *pPlaneIn, 1379 M4VIFI_ImagePlane *pPlaneOut ) 1380{ 1381 M4OSA_ERR err; 1382 1383 M4VSS3GPP_ClipContext *pClip; 1384 M4VSS3GPP_EffectSettings *pFx; 1385 M4VFL_CurtainParam curtainParams; 1386 M4VSS3GPP_ExternalProgress extProgress; 1387 1388 M4OSA_Double VideoEffectTime; 1389 M4OSA_Double PercentageDone; 1390 M4OSA_Int32 tmp; 1391 1392 M4VIFI_ImagePlane *pPlaneTempIn; 1393 M4VIFI_ImagePlane *pPlaneTempOut; 1394 M4OSA_UInt8 i; 1395 M4OSA_UInt8 NumActiveEffects =0; 1396 1397 1398 pClip = pC->pC1; 1399 if (pC->bIssecondClip == M4OSA_TRUE) 1400 { 1401 NumActiveEffects = pC->nbActiveEffects1; 1402 } 1403 else 1404 { 1405 NumActiveEffects = pC->nbActiveEffects; 1406 } 1407 1408 /** 1409 * Allocate temporary plane if needed RC */ 1410 if (M4OSA_NULL == pC->yuv4[0].pac_data && NumActiveEffects > 1) 1411 { 1412 err = M4VSS3GPP_intAllocateYUV420(pC->yuv4, pC->ewc.uiVideoWidth, 1413 pC->ewc.uiVideoHeight); 1414 1415 if( M4NO_ERROR != err ) 1416 { 1417 M4OSA_TRACE1_1( 1418 "M4VSS3GPP_intApplyVideoEffect: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\ 1419 returning M4NO_ERROR", 1420 err); 1421 pC->ewc.VppError = err; 1422 return 1423 M4NO_ERROR; /**< Return no error to the encoder core 1424 (else it may leak in some situations...) */ 1425 } 1426 } 1427 1428 if (NumActiveEffects % 2 == 0) 1429 { 1430 pPlaneTempIn = pPlaneIn; 1431 pPlaneTempOut = pC->yuv4; 1432 } 1433 else 1434 { 1435 pPlaneTempIn = pPlaneIn; 1436 pPlaneTempOut = pPlaneOut; 1437 } 1438 1439 for (i=0; i<NumActiveEffects; i++) 1440 { 1441 if (pC->bIssecondClip == M4OSA_TRUE) 1442 { 1443 1444 1445 pFx = &(pC->pEffectsList[pC->pActiveEffectsList1[i]]); 1446 /* Compute how far from the beginning of the effect we are, in clip-base time. */ 1447 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1448 VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) + 1449 pC->pTransitionList[pC->uiCurrentClip]. 1450 uiTransitionDuration- pFx->uiStartTime; 1451 } 1452 else 1453 { 1454 pFx = &(pC->pEffectsList[pC->pActiveEffectsList[i]]); 1455 /* Compute how far from the beginning of the effect we are, in clip-base time. */ 1456 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1457 VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pFx->uiStartTime; 1458 } 1459 1460 1461 1462 /* To calculate %, substract timeIncrement because effect should finish on the last frame*/ 1463 /* which is presented from CTS = eof-timeIncrement till CTS = eof */ 1464 PercentageDone = VideoEffectTime 1465 / ((M4OSA_Float)pFx->uiDuration/*- pC->dOutputFrameDuration*/); 1466 1467 if( PercentageDone < 0.0 ) 1468 PercentageDone = 0.0; 1469 1470 if( PercentageDone > 1.0 ) 1471 PercentageDone = 1.0; 1472 1473 switch( pFx->VideoEffectType ) 1474 { 1475 case M4VSS3GPP_kVideoEffectType_FadeFromBlack: 1476 /** 1477 * Compute where we are in the effect (scale is 0->1024). */ 1478 tmp = (M4OSA_Int32)(PercentageDone * 1024); 1479 1480 /** 1481 * Apply the darkening effect */ 1482 err = 1483 M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn, 1484 (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL); 1485 1486 if( M4NO_ERROR != err ) 1487 { 1488 M4OSA_TRACE1_1( 1489 "M4VSS3GPP_intApplyVideoEffect:\ 1490 M4VFL_modifyLumaWithScale returns error 0x%x,\ 1491 returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", 1492 err); 1493 return M4VSS3GPP_ERR_LUMA_FILTER_ERROR; 1494 } 1495 break; 1496 1497 case M4VSS3GPP_kVideoEffectType_CurtainOpening: 1498 /** 1499 * Compute where we are in the effect (scale is 0->height). 1500 * It is done with floats because tmp x height can be very large 1501 (with long clips).*/ 1502 curtainParams.nb_black_lines = 1503 (M4OSA_UInt16)(( 1.0 - PercentageDone) 1504 * pPlaneTempIn[0].u_height); 1505 /** 1506 * The curtain is hanged on the ceiling */ 1507 curtainParams.top_is_black = 1; 1508 1509 /** 1510 * Apply the curtain effect */ 1511 err = M4VFL_applyCurtain((M4ViComImagePlane *)pPlaneTempIn, 1512 (M4ViComImagePlane *)pPlaneTempOut, &curtainParams, 1513 M4OSA_NULL); 1514 1515 if( M4NO_ERROR != err ) 1516 { 1517 M4OSA_TRACE1_1( 1518 "M4VSS3GPP_intApplyVideoEffect: M4VFL_applyCurtain returns error 0x%x,\ 1519 returning M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR", 1520 err); 1521 return M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR; 1522 } 1523 break; 1524 1525 case M4VSS3GPP_kVideoEffectType_FadeToBlack: 1526 /** 1527 * Compute where we are in the effect (scale is 0->1024) */ 1528 tmp = (M4OSA_Int32)(( 1.0 - PercentageDone) * 1024); 1529 1530 /** 1531 * Apply the darkening effect */ 1532 err = 1533 M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn, 1534 (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL); 1535 1536 if( M4NO_ERROR != err ) 1537 { 1538 M4OSA_TRACE1_1( 1539 "M4VSS3GPP_intApplyVideoEffect:\ 1540 M4VFL_modifyLumaWithScale returns error 0x%x,\ 1541 returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", 1542 err); 1543 return M4VSS3GPP_ERR_LUMA_FILTER_ERROR; 1544 } 1545 break; 1546 1547 case M4VSS3GPP_kVideoEffectType_CurtainClosing: 1548 /** 1549 * Compute where we are in the effect (scale is 0->height) */ 1550 curtainParams.nb_black_lines = 1551 (M4OSA_UInt16)(PercentageDone * pPlaneTempIn[0].u_height); 1552 1553 /** 1554 * The curtain is hanged on the ceiling */ 1555 curtainParams.top_is_black = 1; 1556 1557 /** 1558 * Apply the curtain effect */ 1559 err = M4VFL_applyCurtain((M4ViComImagePlane *)pPlaneTempIn, 1560 (M4ViComImagePlane *)pPlaneTempOut, &curtainParams, 1561 M4OSA_NULL); 1562 1563 if( M4NO_ERROR != err ) 1564 { 1565 M4OSA_TRACE1_1( 1566 "M4VSS3GPP_intApplyVideoEffect: M4VFL_applyCurtain returns error 0x%x,\ 1567 returning M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR", 1568 err); 1569 return M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR; 1570 } 1571 break; 1572 1573 default: 1574 if( pFx->VideoEffectType 1575 >= M4VSS3GPP_kVideoEffectType_External ) 1576 { 1577 M4OSA_UInt32 Cts = 0; 1578 M4OSA_Int32 nextEffectTime; 1579 1580 /** 1581 * Compute where we are in the effect (scale is 0->1000) */ 1582 tmp = (M4OSA_Int32)(PercentageDone * 1000); 1583 1584 /** 1585 * Set the progress info provided to the external function */ 1586 extProgress.uiProgress = (M4OSA_UInt32)tmp; 1587 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1588 extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts; 1589 extProgress.uiClipTime = extProgress.uiOutputTime - pClip->iVoffset; 1590 extProgress.bIsLast = M4OSA_FALSE; 1591 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1592 nextEffectTime = (M4OSA_Int32)(pC->ewc.dInputVidCts \ 1593 + pC->dOutputFrameDuration); 1594 if(nextEffectTime >= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) 1595 { 1596 extProgress.bIsLast = M4OSA_TRUE; 1597 } 1598 1599 err = pFx->ExtVideoEffectFct(pFx->pExtVideoEffectFctCtxt, 1600 pPlaneTempIn, pPlaneTempOut, &extProgress, 1601 pFx->VideoEffectType 1602 - M4VSS3GPP_kVideoEffectType_External); 1603 1604 if( M4NO_ERROR != err ) 1605 { 1606 M4OSA_TRACE1_1( 1607 "M4VSS3GPP_intApplyVideoEffect: \ 1608 External video effect function returns 0x%x!", 1609 err); 1610 return err; 1611 } 1612 break; 1613 } 1614 else 1615 { 1616 M4OSA_TRACE1_1( 1617 "M4VSS3GPP_intApplyVideoEffect: unknown effect type (0x%x),\ 1618 returning M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE", 1619 pFx->VideoEffectType); 1620 return M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE; 1621 } 1622 } 1623 /** 1624 * RC Updates pTempPlaneIn and pTempPlaneOut depending on current effect */ 1625 if (((i % 2 == 0) && (NumActiveEffects % 2 == 0)) 1626 || ((i % 2 != 0) && (NumActiveEffects % 2 != 0))) 1627 { 1628 pPlaneTempIn = pC->yuv4; 1629 pPlaneTempOut = pPlaneOut; 1630 } 1631 else 1632 { 1633 pPlaneTempIn = pPlaneOut; 1634 pPlaneTempOut = pC->yuv4; 1635 } 1636 } 1637 1638 /** 1639 * Return */ 1640 M4OSA_TRACE3_0("M4VSS3GPP_intApplyVideoEffect: returning M4NO_ERROR"); 1641 return M4NO_ERROR; 1642} 1643 1644/** 1645 ****************************************************************************** 1646 * M4OSA_ERR M4VSS3GPP_intVideoTransition() 1647 * @brief Apply video transition effect pC1+pC2->pPlaneOut 1648 * @param pC (IN/OUT) Internal edit context 1649 * @param pOutputPlanes (IN/OUT) Output raw YUV420 image 1650 * @return M4NO_ERROR: No error 1651 ****************************************************************************** 1652 */ 1653static M4OSA_ERR 1654M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC, 1655 M4VIFI_ImagePlane *pPlaneOut ) 1656{ 1657 M4OSA_ERR err; 1658 M4OSA_Int32 iProgress; 1659 M4VSS3GPP_ExternalProgress extProgress; 1660 M4VIFI_ImagePlane *pPlane; 1661 M4OSA_Int32 i; 1662 const M4OSA_Int32 iDur = (M4OSA_Int32)pC-> 1663 pTransitionList[pC->uiCurrentClip].uiTransitionDuration; 1664 1665 /** 1666 * Compute how far from the end cut we are, in clip-base time. 1667 * It is done with integers because the offset and begin cut have been rounded already. */ 1668 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1669 iProgress = (M4OSA_Int32)((M4OSA_Double)pC->pC1->iEndTime) - pC->ewc.dInputVidCts + 1670 ((M4OSA_Double)pC->pC1->iVoffset); 1671 /** 1672 * We must remove the duration of one frame, else we would almost never reach the end 1673 * (It's kind of a "pile and intervals" issue). */ 1674 iProgress -= (M4OSA_Int32)pC->dOutputFrameDuration; 1675 1676 if( iProgress < 0 ) /**< Sanity checks */ 1677 { 1678 iProgress = 0; 1679 } 1680 1681 /** 1682 * Compute where we are in the transition, on a base 1000 */ 1683 iProgress = ( ( iDur - iProgress) * 1000) / iDur; 1684 1685 /** 1686 * Sanity checks */ 1687 if( iProgress < 0 ) 1688 { 1689 iProgress = 0; 1690 } 1691 else if( iProgress > 1000 ) 1692 { 1693 iProgress = 1000; 1694 } 1695 1696 switch( pC->pTransitionList[pC->uiCurrentClip].TransitionBehaviour ) 1697 { 1698 case M4VSS3GPP_TransitionBehaviour_SpeedUp: 1699 iProgress = ( iProgress * iProgress) / 1000; 1700 break; 1701 1702 case M4VSS3GPP_TransitionBehaviour_Linear: 1703 /*do nothing*/ 1704 break; 1705 1706 case M4VSS3GPP_TransitionBehaviour_SpeedDown: 1707 iProgress = (M4OSA_Int32)(sqrt(iProgress * 1000)); 1708 break; 1709 1710 case M4VSS3GPP_TransitionBehaviour_SlowMiddle: 1711 if( iProgress < 500 ) 1712 { 1713 iProgress = (M4OSA_Int32)(sqrt(iProgress * 500)); 1714 } 1715 else 1716 { 1717 iProgress = 1718 (M4OSA_Int32)(( ( ( iProgress - 500) * (iProgress - 500)) 1719 / 500) + 500); 1720 } 1721 break; 1722 1723 case M4VSS3GPP_TransitionBehaviour_FastMiddle: 1724 if( iProgress < 500 ) 1725 { 1726 iProgress = (M4OSA_Int32)(( iProgress * iProgress) / 500); 1727 } 1728 else 1729 { 1730 iProgress = (M4OSA_Int32)(sqrt(( iProgress - 500) * 500) + 500); 1731 } 1732 break; 1733 1734 default: 1735 /*do nothing*/ 1736 break; 1737 } 1738 1739 switch( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType ) 1740 { 1741 case M4VSS3GPP_kVideoTransitionType_CrossFade: 1742 /** 1743 * Apply the transition effect */ 1744 err = M4VIFI_ImageBlendingonYUV420(M4OSA_NULL, 1745 (M4ViComImagePlane *)pC->yuv1, 1746 (M4ViComImagePlane *)pC->yuv2, 1747 (M4ViComImagePlane *)pPlaneOut, iProgress); 1748 1749 if( M4NO_ERROR != err ) 1750 { 1751 M4OSA_TRACE1_1( 1752 "M4VSS3GPP_intVideoTransition:\ 1753 M4VIFI_ImageBlendingonYUV420 returns error 0x%x,\ 1754 returning M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR", 1755 err); 1756 return M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR; 1757 } 1758 break; 1759 1760 case M4VSS3GPP_kVideoTransitionType_None: 1761 /** 1762 * This is a stupid-non optimized version of the None transition... 1763 * We copy the YUV frame */ 1764 if( iProgress < 500 ) /**< first half of transition */ 1765 { 1766 pPlane = pC->yuv1; 1767 } 1768 else /**< second half of transition */ 1769 { 1770 pPlane = pC->yuv2; 1771 } 1772 /** 1773 * Copy the input YUV frames */ 1774 i = 3; 1775 1776 while( i-- > 0 ) 1777 { 1778 M4OSA_memcpy((M4OSA_MemAddr8)pPlaneOut[i].pac_data, 1779 (M4OSA_MemAddr8)pPlane[i].pac_data, 1780 pPlaneOut[i].u_stride * pPlaneOut[i].u_height); 1781 } 1782 break; 1783 1784 default: 1785 if( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType 1786 >= M4VSS3GPP_kVideoTransitionType_External ) 1787 { 1788 /** 1789 * Set the progress info provided to the external function */ 1790 extProgress.uiProgress = (M4OSA_UInt32)iProgress; 1791 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1792 extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts; 1793 extProgress.uiClipTime = extProgress.uiOutputTime - pC->pC1->iVoffset; 1794 1795 err = pC->pTransitionList[pC-> 1796 uiCurrentClip].ExtVideoTransitionFct( 1797 pC->pTransitionList[pC-> 1798 uiCurrentClip].pExtVideoTransitionFctCtxt, 1799 pC->yuv1, pC->yuv2, pPlaneOut, &extProgress, 1800 pC->pTransitionList[pC-> 1801 uiCurrentClip].VideoTransitionType 1802 - M4VSS3GPP_kVideoTransitionType_External); 1803 1804 if( M4NO_ERROR != err ) 1805 { 1806 M4OSA_TRACE1_1( 1807 "M4VSS3GPP_intVideoTransition:\ 1808 External video transition function returns 0x%x!", 1809 err); 1810 return err; 1811 } 1812 break; 1813 } 1814 else 1815 { 1816 M4OSA_TRACE1_1( 1817 "M4VSS3GPP_intVideoTransition: unknown transition type (0x%x),\ 1818 returning M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE", 1819 pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType); 1820 return M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE; 1821 } 1822 } 1823 1824 /** 1825 * Return */ 1826 M4OSA_TRACE3_0("M4VSS3GPP_intVideoTransition: returning M4NO_ERROR"); 1827 return M4NO_ERROR; 1828} 1829 1830/** 1831 ****************************************************************************** 1832 * M4OSA_Void M4VSS3GPP_intUpdateTimeInfo() 1833 * @brief Update bit stream time info by Counter Time System to be compliant with 1834 * players using bit stream time info 1835 * @note H263 uses an absolute time counter unlike MPEG4 which uses Group Of Vops 1836 * (GOV, see the standard) 1837 * @param pC (IN/OUT) returns time updated video AU, 1838 * the offset between system and video time (MPEG4 only) 1839 * and the state of the current clip (MPEG4 only) 1840 * @return nothing 1841 ****************************************************************************** 1842 */ 1843static M4OSA_Void 1844M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC, 1845 M4SYS_AccessUnit *pAU ) 1846{ 1847 M4OSA_UInt8 uiTmp; 1848 M4OSA_UInt32 uiCts = 0; 1849 M4OSA_MemAddr8 pTmp; 1850 M4OSA_UInt32 uiAdd; 1851 M4OSA_UInt32 uiCurrGov; 1852 M4OSA_Int8 iDiff; 1853 1854 M4VSS3GPP_ClipContext *pClipCtxt = pC->pC1; 1855 M4OSA_Int32 *pOffset = &(pC->ewc.iMpeg4GovOffset); 1856 1857 /** 1858 * Set H263 time counter from system time */ 1859 if( M4SYS_kH263 == pAU->stream->streamType ) 1860 { 1861 uiTmp = (M4OSA_UInt8)((M4OSA_UInt32)( ( pAU->CTS * 30) / 1001 + 0.5) 1862 % M4VSS3GPP_EDIT_H263_MODULO_TIME); 1863 M4VSS3GPP_intSetH263TimeCounter((M4OSA_MemAddr8)(pAU->dataAddress), 1864 uiTmp); 1865 } 1866 /* 1867 * Set MPEG4 GOV time counter regarding video and system time */ 1868 else if( M4SYS_kMPEG_4 == pAU->stream->streamType ) 1869 { 1870 /* 1871 * If GOV. 1872 * beware of little/big endian! */ 1873 /* correction: read 8 bits block instead of one 32 bits block */ 1874 M4OSA_UInt8 *temp8 = (M4OSA_UInt8 *)(pAU->dataAddress); 1875 M4OSA_UInt32 temp32 = 0; 1876 1877 temp32 = ( 0x000000ff & (M4OSA_UInt32)(*temp8)) 1878 + (0x0000ff00 & ((M4OSA_UInt32)(*(temp8 + 1))) << 8) 1879 + (0x00ff0000 & ((M4OSA_UInt32)(*(temp8 + 2))) << 16) 1880 + (0xff000000 & ((M4OSA_UInt32)(*(temp8 + 3))) << 24); 1881 1882 M4OSA_TRACE3_2("RC: Temp32: 0x%x, dataAddress: 0x%x\n", temp32, 1883 *(pAU->dataAddress)); 1884 1885 if( M4VSS3GPP_EDIT_GOV_HEADER == temp32 ) 1886 { 1887 pTmp = 1888 (M4OSA_MemAddr8)(pAU->dataAddress 1889 + 1); /**< Jump to the time code (just after the 32 bits header) */ 1890 uiAdd = (M4OSA_UInt32)(pAU->CTS)+( *pOffset); 1891 1892 switch( pClipCtxt->bMpeg4GovState ) 1893 { 1894 case M4OSA_FALSE: /*< INIT */ 1895 { 1896 /* video time = ceil (system time + offset) */ 1897 uiCts = ( uiAdd + 999) / 1000; 1898 1899 /* offset update */ 1900 ( *pOffset) += (( uiCts * 1000) - uiAdd); 1901 1902 /* Save values */ 1903 pClipCtxt->uiMpeg4PrevGovValueSet = uiCts; 1904 1905 /* State to 'first' */ 1906 pClipCtxt->bMpeg4GovState = M4OSA_TRUE; 1907 } 1908 break; 1909 1910 case M4OSA_TRUE: /*< UPDATE */ 1911 { 1912 /* Get current Gov value */ 1913 M4VSS3GPP_intGetMPEG4Gov(pTmp, &uiCurrGov); 1914 1915 /* video time = floor or ceil (system time + offset) */ 1916 uiCts = (uiAdd / 1000); 1917 iDiff = (M4OSA_Int8)(uiCurrGov 1918 - pClipCtxt->uiMpeg4PrevGovValueGet - uiCts 1919 + pClipCtxt->uiMpeg4PrevGovValueSet); 1920 1921 /* ceiling */ 1922 if( iDiff > 0 ) 1923 { 1924 uiCts += (M4OSA_UInt32)(iDiff); 1925 1926 /* offset update */ 1927 ( *pOffset) += (( uiCts * 1000) - uiAdd); 1928 } 1929 1930 /* Save values */ 1931 pClipCtxt->uiMpeg4PrevGovValueGet = uiCurrGov; 1932 pClipCtxt->uiMpeg4PrevGovValueSet = uiCts; 1933 } 1934 break; 1935 } 1936 1937 M4VSS3GPP_intSetMPEG4Gov(pTmp, uiCts); 1938 } 1939 } 1940 return; 1941} 1942 1943/** 1944 ****************************************************************************** 1945 * M4OSA_Void M4VSS3GPP_intCheckVideoEffects() 1946 * @brief Check which video effect must be applied at the current time 1947 ****************************************************************************** 1948 */ 1949static M4OSA_Void 1950M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC, 1951 M4OSA_UInt8 uiClipNumber ) 1952{ 1953 M4OSA_UInt8 uiClipIndex; 1954 M4OSA_UInt8 uiFxIndex, i; 1955 M4VSS3GPP_ClipContext *pClip; 1956 M4VSS3GPP_EffectSettings *pFx; 1957 M4OSA_Int32 Off, BC, EC; 1958 // Decorrelate input and output encoding timestamp to handle encoder prefetch 1959 M4OSA_Int32 t = (M4OSA_Int32)pC->ewc.dInputVidCts; 1960 1961 uiClipIndex = pC->uiCurrentClip; 1962 pClip = pC->pC1; 1963 /** 1964 * Shortcuts for code readability */ 1965 Off = pClip->iVoffset; 1966 BC = pClip->iActualVideoBeginCut; 1967 EC = pClip->iEndTime; 1968 1969 i = 0; 1970 1971 for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ ) 1972 { 1973 /** Shortcut, reverse order because of priority between effects(EndEffect always clean )*/ 1974 pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]); 1975 1976 if( M4VSS3GPP_kVideoEffectType_None != pFx->VideoEffectType ) 1977 { 1978 /** 1979 * Check if there is actually a video effect */ 1980 1981 if(uiClipNumber ==1) 1982 { 1983 /**< Are we after the start time of the effect? 1984 * or Are we into the effect duration? 1985 */ 1986 if ( (t >= (M4OSA_Int32)(pFx->uiStartTime)) && 1987 (t <= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) { 1988 /** 1989 * Set the active effect(s) */ 1990 pC->pActiveEffectsList[i] = pC->nbEffects-1-uiFxIndex; 1991 1992 /** 1993 * Update counter of active effects */ 1994 i++; 1995 1996 /** 1997 * For all external effects set this flag to true. */ 1998 if(pFx->VideoEffectType > M4VSS3GPP_kVideoEffectType_External) 1999 { 2000 pC->m_bClipExternalHasStarted = M4OSA_TRUE; 2001 } 2002 2003 /** 2004 * The third effect has the highest priority, then the 2005 * second one, then the first one. Hence, as soon as we 2006 * found an active effect, we can get out of this loop. 2007 */ 2008 } 2009 } 2010 else 2011 { 2012 /**< Are we into the effect duration? */ 2013 if ( (t + pC->pTransitionList[uiClipIndex].uiTransitionDuration 2014 >= (M4OSA_Int32)(pFx->uiStartTime)) 2015 && (t + pC->pTransitionList[uiClipIndex].uiTransitionDuration 2016 <= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) { 2017 /** 2018 * Set the active effect(s) */ 2019 pC->pActiveEffectsList1[i] = pC->nbEffects-1-uiFxIndex; 2020 2021 /** 2022 * Update counter of active effects */ 2023 i++; 2024 2025 /** 2026 * For all external effects set this flag to true. */ 2027 if(pFx->VideoEffectType > M4VSS3GPP_kVideoEffectType_External) 2028 { 2029 pC->m_bClipExternalHasStarted = M4OSA_TRUE; 2030 } 2031 2032 /** 2033 * The third effect has the highest priority, then the second one, then the first one. 2034 * Hence, as soon as we found an active effect, we can get out of this loop */ 2035 } 2036 } 2037 } 2038 } 2039 2040 if(1==uiClipNumber) 2041 { 2042 /** 2043 * Save number of active effects */ 2044 pC->nbActiveEffects = i; 2045 } 2046 else 2047 { 2048 pC->nbActiveEffects1 = i; 2049 } 2050 2051 /** 2052 * Change the absolut time to clip related time */ 2053 t -= Off; 2054 2055 /** 2056 * Check if we are on the begin cut (for clip1 only) */ 2057 if( ( 0 != BC) && (t == BC) && (1 == uiClipNumber) ) 2058 { 2059 pC->bClip1AtBeginCut = M4OSA_TRUE; 2060 } 2061 else 2062 { 2063 pC->bClip1AtBeginCut = M4OSA_FALSE; 2064 } 2065 2066 return; 2067} 2068 2069/** 2070 ****************************************************************************** 2071 * M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder() 2072 * @brief Creates the video encoder 2073 * @note 2074 ****************************************************************************** 2075 */ 2076M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder( M4VSS3GPP_InternalEditContext *pC ) 2077{ 2078 M4OSA_ERR err; 2079 M4ENCODER_AdvancedParams EncParams; 2080 2081 /** 2082 * Simulate a writer interface with our specific function */ 2083 pC->ewc.OurWriterDataInterface.pProcessAU = 2084 M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific, 2085 but it follow the writer interface */ 2086 pC->ewc.OurWriterDataInterface.pStartAU = 2087 M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific, 2088 but it follow the writer interface */ 2089 pC->ewc.OurWriterDataInterface.pWriterContext = 2090 (M4WRITER_Context) 2091 pC; /**< We give the internal context as writer context */ 2092 2093 /** 2094 * Get the encoder interface, if not already done */ 2095 if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts ) 2096 { 2097 err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI, 2098 pC->ewc.VideoStreamType); 2099 M4OSA_TRACE1_1( 2100 "M4VSS3GPP_intCreateVideoEncoder: setCurrentEncoder returns 0x%x", 2101 err); 2102 M4ERR_CHECK_RETURN(err); 2103 } 2104 2105 /** 2106 * Set encoder shell parameters according to VSS settings */ 2107 2108 /* Common parameters */ 2109 EncParams.InputFormat = M4ENCODER_kIYUV420; 2110 EncParams.FrameWidth = pC->ewc.uiVideoWidth; 2111 EncParams.FrameHeight = pC->ewc.uiVideoHeight; 2112 EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale; 2113 2114 if( pC->bIsMMS == M4OSA_FALSE ) 2115 { 2116 /* No strict regulation in video editor */ 2117 /* Because of the effects and transitions we should allow more flexibility */ 2118 /* Also it prevents to drop important frames (with a bad result on sheduling and 2119 block effetcs) */ 2120 EncParams.bInternalRegulation = M4OSA_FALSE; 2121 // Variable framerate is not supported by StageFright encoders 2122 EncParams.FrameRate = M4ENCODER_k30_FPS; 2123 } 2124 else 2125 { 2126 /* In case of MMS mode, we need to enable bitrate regulation to be sure */ 2127 /* to reach the targeted output file size */ 2128 EncParams.bInternalRegulation = M4OSA_TRUE; 2129 EncParams.FrameRate = pC->MMSvideoFramerate; 2130 } 2131 2132 /** 2133 * Other encoder settings (defaults) */ 2134 EncParams.uiHorizontalSearchRange = 0; /* use default */ 2135 EncParams.uiVerticalSearchRange = 0; /* use default */ 2136 EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */ 2137 EncParams.uiIVopPeriod = 0; /* use default */ 2138 EncParams.uiMotionEstimationTools = 0; /* M4V_MOTION_EST_TOOLS_ALL */ 2139 EncParams.bAcPrediction = M4OSA_TRUE; /* use AC prediction */ 2140 EncParams.uiStartingQuantizerValue = 10; /* initial QP = 10 */ 2141 EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */ 2142 2143 switch ( pC->ewc.VideoStreamType ) 2144 { 2145 case M4SYS_kH263: 2146 2147 EncParams.Format = M4ENCODER_kH263; 2148 2149 EncParams.uiStartingQuantizerValue = 10; 2150 EncParams.uiRateFactor = 1; /* default */ 2151 2152 EncParams.bErrorResilience = M4OSA_FALSE; 2153 EncParams.bDataPartitioning = M4OSA_FALSE; 2154 break; 2155 2156 case M4SYS_kMPEG_4: 2157 2158 EncParams.Format = M4ENCODER_kMPEG4; 2159 2160 EncParams.uiStartingQuantizerValue = 8; 2161 EncParams.uiRateFactor = (M4OSA_UInt8)(( pC->dOutputFrameDuration 2162 * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5); 2163 2164 if( EncParams.uiRateFactor == 0 ) 2165 EncParams.uiRateFactor = 1; /* default */ 2166 2167 if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning ) 2168 { 2169 EncParams.bErrorResilience = M4OSA_FALSE; 2170 EncParams.bDataPartitioning = M4OSA_FALSE; 2171 } 2172 else 2173 { 2174 EncParams.bErrorResilience = M4OSA_TRUE; 2175 EncParams.bDataPartitioning = M4OSA_TRUE; 2176 } 2177 break; 2178 2179 case M4SYS_kH264: 2180 M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: M4SYS_H264"); 2181 2182 EncParams.Format = M4ENCODER_kH264; 2183 2184 EncParams.uiStartingQuantizerValue = 10; 2185 EncParams.uiRateFactor = 1; /* default */ 2186 2187 EncParams.bErrorResilience = M4OSA_FALSE; 2188 EncParams.bDataPartitioning = M4OSA_FALSE; 2189 //EncParams.FrameRate = M4VIDEOEDITING_k5_FPS; 2190 break; 2191 2192 default: 2193 M4OSA_TRACE1_1( 2194 "M4VSS3GPP_intCreateVideoEncoder: Unknown videoStreamType 0x%x", 2195 pC->ewc.VideoStreamType); 2196 return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT; 2197 } 2198 2199 /* In case of EMP we overwrite certain parameters */ 2200 if( M4OSA_TRUE == pC->ewc.bActivateEmp ) 2201 { 2202 EncParams.uiHorizontalSearchRange = 15; /* set value */ 2203 EncParams.uiVerticalSearchRange = 15; /* set value */ 2204 EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */ 2205 EncParams.uiIVopPeriod = 15; /* one I frame every 15 frames */ 2206 EncParams.uiMotionEstimationTools = 1; /* M4V_MOTION_EST_TOOLS_NO_4MV */ 2207 EncParams.bAcPrediction = M4OSA_FALSE; /* no AC prediction */ 2208 EncParams.uiStartingQuantizerValue = 10; /* initial QP = 10 */ 2209 EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */ 2210 } 2211 2212 if( pC->bIsMMS == M4OSA_FALSE ) 2213 { 2214 /* Compute max bitrate depending on input files bitrates and transitions */ 2215 if( pC->Vstate == M4VSS3GPP_kEditVideoState_TRANSITION ) 2216 { 2217#if 0 2218 /* Max of the two blended files */ 2219 if( pC->pC1->pSettings->ClipProperties.uiVideoBitrate 2220 > pC->pC2->pSettings->ClipProperties.uiVideoBitrate ) 2221 EncParams.Bitrate = 2222 pC->pC1->pSettings->ClipProperties.uiVideoBitrate; 2223 else 2224 EncParams.Bitrate = 2225 pC->pC2->pSettings->ClipProperties.uiVideoBitrate; 2226#endif 2227 EncParams.Bitrate = pC->ewc.uiVideoBitrate; 2228 } 2229 else 2230 { 2231 EncParams.Bitrate = pC->ewc.uiVideoBitrate; 2232 } 2233 } 2234 else 2235 { 2236 EncParams.Bitrate = pC->uiMMSVideoBitrate; /* RC */ 2237 EncParams.uiTimeScale = 0; /* We let the encoder choose the timescale */ 2238 } 2239 2240 M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctInit"); 2241 /** 2242 * Init the video encoder (advanced settings version of the encoder Open function) */ 2243 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext, 2244 &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC, 2245 pC->ShellAPI.pCurrentVideoEncoderExternalAPI, 2246 pC->ShellAPI.pCurrentVideoEncoderUserData); 2247 2248 if( M4NO_ERROR != err ) 2249 { 2250 M4OSA_TRACE1_1( 2251 "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctInit returns 0x%x", 2252 err); 2253 return err; 2254 } 2255 2256 pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed; 2257 M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctOpen"); 2258 2259 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext, 2260 &pC->ewc.WriterVideoAU, &EncParams); 2261 2262 if( M4NO_ERROR != err ) 2263 { 2264 M4OSA_TRACE1_1( 2265 "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctOpen returns 0x%x", 2266 err); 2267 return err; 2268 } 2269 2270 pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped; 2271 M4OSA_TRACE1_0( 2272 "M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctStart"); 2273 2274 if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart ) 2275 { 2276 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart( 2277 pC->ewc.pEncContext); 2278 2279 if( M4NO_ERROR != err ) 2280 { 2281 M4OSA_TRACE1_1( 2282 "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctStart returns 0x%x", 2283 err); 2284 return err; 2285 } 2286 } 2287 2288 pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning; 2289 2290 /** 2291 * Return */ 2292 M4OSA_TRACE3_0("M4VSS3GPP_intCreateVideoEncoder: returning M4NO_ERROR"); 2293 return M4NO_ERROR; 2294} 2295 2296/** 2297 ****************************************************************************** 2298 * M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder() 2299 * @brief Destroy the video encoder 2300 * @note 2301 ****************************************************************************** 2302 */ 2303M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder( M4VSS3GPP_InternalEditContext *pC ) 2304{ 2305 M4OSA_ERR err = M4NO_ERROR; 2306 2307 if( M4OSA_NULL != pC->ewc.pEncContext ) 2308 { 2309 if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState ) 2310 { 2311 if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL ) 2312 { 2313 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop( 2314 pC->ewc.pEncContext); 2315 2316 if( M4NO_ERROR != err ) 2317 { 2318 M4OSA_TRACE1_1( 2319 "M4VSS3GPP_intDestroyVideoEncoder:\ 2320 pVideoEncoderGlobalFcts->pFctStop returns 0x%x", 2321 err); 2322 /* Well... how the heck do you handle a failed cleanup? */ 2323 } 2324 } 2325 2326 pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped; 2327 } 2328 2329 /* Has the encoder actually been opened? Don't close it if that's not the case. */ 2330 if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState ) 2331 { 2332 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose( 2333 pC->ewc.pEncContext); 2334 2335 if( M4NO_ERROR != err ) 2336 { 2337 M4OSA_TRACE1_1( 2338 "M4VSS3GPP_intDestroyVideoEncoder:\ 2339 pVideoEncoderGlobalFcts->pFctClose returns 0x%x", 2340 err); 2341 /* Well... how the heck do you handle a failed cleanup? */ 2342 } 2343 2344 pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed; 2345 } 2346 2347 err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup( 2348 pC->ewc.pEncContext); 2349 2350 if( M4NO_ERROR != err ) 2351 { 2352 M4OSA_TRACE1_1( 2353 "M4VSS3GPP_intDestroyVideoEncoder:\ 2354 pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!", 2355 err); 2356 /**< We do not return the error here because we still have stuff to free */ 2357 } 2358 2359 pC->ewc.encoderState = M4VSS3GPP_kNoEncoder; 2360 /** 2361 * Reset variable */ 2362 pC->ewc.pEncContext = M4OSA_NULL; 2363 } 2364 2365 M4OSA_TRACE3_1("M4VSS3GPP_intDestroyVideoEncoder: returning 0x%x", err); 2366 return err; 2367} 2368 2369/** 2370 ****************************************************************************** 2371 * M4OSA_Void M4VSS3GPP_intSetH263TimeCounter() 2372 * @brief Modify the time counter of the given H263 video AU 2373 * @note 2374 * @param pAuDataBuffer (IN/OUT) H263 Video AU to modify 2375 * @param uiCts (IN) New time counter value 2376 * @return nothing 2377 ****************************************************************************** 2378 */ 2379static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer, 2380 M4OSA_UInt8 uiCts ) 2381{ 2382 /* 2383 * The H263 time counter is 8 bits located on the "x" below: 2384 * 2385 * |--------|--------|--------|--------| 2386 * ???????? ???????? ??????xx xxxxxx?? 2387 */ 2388 2389 /** 2390 * Write the 2 bits on the third byte */ 2391 pAuDataBuffer[2] = ( pAuDataBuffer[2] & 0xFC) | (( uiCts >> 6) & 0x3); 2392 2393 /** 2394 * Write the 6 bits on the fourth byte */ 2395 pAuDataBuffer[3] = ( ( uiCts << 2) & 0xFC) | (pAuDataBuffer[3] & 0x3); 2396 2397 return; 2398} 2399 2400/** 2401 ****************************************************************************** 2402 * M4OSA_Void M4VSS3GPP_intSetMPEG4Gov() 2403 * @brief Modify the time info from Group Of VOP video AU 2404 * @note 2405 * @param pAuDataBuffer (IN) MPEG4 Video AU to modify 2406 * @param uiCtsSec (IN) New GOV time info in second unit 2407 * @return nothing 2408 ****************************************************************************** 2409 */ 2410static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer, 2411 M4OSA_UInt32 uiCtsSec ) 2412{ 2413 /* 2414 * The MPEG-4 time code length is 18 bits: 2415 * 2416 * hh mm marker ss 2417 * xxxxx|xxx xxx 1 xxxx xx ?????? 2418 * |----- ---|--- - ----|-- ------| 2419 */ 2420 M4OSA_UInt8 uiHh; 2421 M4OSA_UInt8 uiMm; 2422 M4OSA_UInt8 uiSs; 2423 M4OSA_UInt8 uiTmp; 2424 2425 /** 2426 * Write the 2 last bits ss */ 2427 uiSs = (M4OSA_UInt8)(uiCtsSec % 60); /**< modulo part */ 2428 pAuDataBuffer[2] = (( ( uiSs & 0x03) << 6) | (pAuDataBuffer[2] & 0x3F)); 2429 2430 if( uiCtsSec < 60 ) 2431 { 2432 /** 2433 * Write the 3 last bits of mm, the marker bit (0x10 */ 2434 pAuDataBuffer[1] = (( 0x10) | (uiSs >> 2)); 2435 2436 /** 2437 * Write the 5 bits of hh and 3 of mm (out of 6) */ 2438 pAuDataBuffer[0] = 0; 2439 } 2440 else 2441 { 2442 /** 2443 * Write the 3 last bits of mm, the marker bit (0x10 */ 2444 uiTmp = (M4OSA_UInt8)(uiCtsSec / 60); /**< integer part */ 2445 uiMm = (M4OSA_UInt8)(uiTmp % 60); 2446 pAuDataBuffer[1] = (( uiMm << 5) | (0x10) | (uiSs >> 2)); 2447 2448 if( uiTmp < 60 ) 2449 { 2450 /** 2451 * Write the 5 bits of hh and 3 of mm (out of 6) */ 2452 pAuDataBuffer[0] = ((uiMm >> 3)); 2453 } 2454 else 2455 { 2456 /** 2457 * Write the 5 bits of hh and 3 of mm (out of 6) */ 2458 uiHh = (M4OSA_UInt8)(uiTmp / 60); 2459 pAuDataBuffer[0] = (( uiHh << 3) | (uiMm >> 3)); 2460 } 2461 } 2462 return; 2463} 2464 2465/** 2466 ****************************************************************************** 2467 * M4OSA_Void M4VSS3GPP_intGetMPEG4Gov() 2468 * @brief Get the time info from Group Of VOP video AU 2469 * @note 2470 * @param pAuDataBuffer (IN) MPEG4 Video AU to modify 2471 * @param pCtsSec (OUT) Current GOV time info in second unit 2472 * @return nothing 2473 ****************************************************************************** 2474 */ 2475static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer, 2476 M4OSA_UInt32 *pCtsSec ) 2477{ 2478 /* 2479 * The MPEG-4 time code length is 18 bits: 2480 * 2481 * hh mm marker ss 2482 * xxxxx|xxx xxx 1 xxxx xx ?????? 2483 * |----- ---|--- - ----|-- ------| 2484 */ 2485 M4OSA_UInt8 uiHh; 2486 M4OSA_UInt8 uiMm; 2487 M4OSA_UInt8 uiSs; 2488 M4OSA_UInt8 uiTmp; 2489 M4OSA_UInt32 uiCtsSec; 2490 2491 /** 2492 * Read ss */ 2493 uiSs = (( pAuDataBuffer[2] & 0xC0) >> 6); 2494 uiTmp = (( pAuDataBuffer[1] & 0x0F) << 2); 2495 uiCtsSec = uiSs + uiTmp; 2496 2497 /** 2498 * Read mm */ 2499 uiMm = (( pAuDataBuffer[1] & 0xE0) >> 5); 2500 uiTmp = (( pAuDataBuffer[0] & 0x07) << 3); 2501 uiMm = uiMm + uiTmp; 2502 uiCtsSec = ( uiMm * 60) + uiCtsSec; 2503 2504 /** 2505 * Read hh */ 2506 uiHh = (( pAuDataBuffer[0] & 0xF8) >> 3); 2507 2508 if( uiHh ) 2509 { 2510 uiCtsSec = ( uiHh * 3600) + uiCtsSec; 2511 } 2512 2513 /* 2514 * in sec */ 2515 *pCtsSec = uiCtsSec; 2516 2517 return; 2518} 2519 2520/** 2521 ****************************************************************************** 2522 * M4OSA_ERR M4VSS3GPP_intAllocateYUV420() 2523 * @brief Allocate the three YUV 4:2:0 planes 2524 * @note 2525 * @param pPlanes (IN/OUT) valid pointer to 3 M4VIFI_ImagePlane structures 2526 * @param uiWidth (IN) Image width 2527 * @param uiHeight(IN) Image height 2528 ****************************************************************************** 2529 */ 2530static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes, 2531 M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight ) 2532{ 2533 2534 pPlanes[0].u_width = uiWidth; 2535 pPlanes[0].u_height = uiHeight; 2536 pPlanes[0].u_stride = uiWidth; 2537 pPlanes[0].u_topleft = 0; 2538 pPlanes[0].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[0].u_stride 2539 * pPlanes[0].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[0].pac_data"); 2540 2541 if( M4OSA_NULL == pPlanes[0].pac_data ) 2542 { 2543 M4OSA_TRACE1_0( 2544 "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[0].pac_data,\ 2545 returning M4ERR_ALLOC"); 2546 return M4ERR_ALLOC; 2547 } 2548 2549 pPlanes[1].u_width = pPlanes[0].u_width >> 1; 2550 pPlanes[1].u_height = pPlanes[0].u_height >> 1; 2551 pPlanes[1].u_stride = pPlanes[1].u_width; 2552 pPlanes[1].u_topleft = 0; 2553 pPlanes[1].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[1].u_stride 2554 * pPlanes[1].u_height, M4VSS3GPP,(M4OSA_Char *) "pPlanes[1].pac_data"); 2555 2556 if( M4OSA_NULL == pPlanes[1].pac_data ) 2557 { 2558 M4OSA_TRACE1_0( 2559 "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[1].pac_data,\ 2560 returning M4ERR_ALLOC"); 2561 return M4ERR_ALLOC; 2562 } 2563 2564 pPlanes[2].u_width = pPlanes[1].u_width; 2565 pPlanes[2].u_height = pPlanes[1].u_height; 2566 pPlanes[2].u_stride = pPlanes[2].u_width; 2567 pPlanes[2].u_topleft = 0; 2568 pPlanes[2].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[2].u_stride 2569 * pPlanes[2].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[2].pac_data"); 2570 2571 if( M4OSA_NULL == pPlanes[2].pac_data ) 2572 { 2573 M4OSA_TRACE1_0( 2574 "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[2].pac_data,\ 2575 returning M4ERR_ALLOC"); 2576 return M4ERR_ALLOC; 2577 } 2578 2579 /** 2580 * Return */ 2581 M4OSA_TRACE3_0("M4VSS3GPP_intAllocateYUV420: returning M4NO_ERROR"); 2582 return M4NO_ERROR; 2583} 2584