1/* 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#ifndef WEBRTC_COMMON_TYPES_H_ 12#define WEBRTC_COMMON_TYPES_H_ 13 14#include <stddef.h> 15#include <string.h> 16 17#include <string> 18#include <vector> 19 20#include "webrtc/typedefs.h" 21 22#if defined(_MSC_VER) 23// Disable "new behavior: elements of array will be default initialized" 24// warning. Affects OverUseDetectorOptions. 25#pragma warning(disable:4351) 26#endif 27 28#ifdef WEBRTC_EXPORT 29#define WEBRTC_DLLEXPORT _declspec(dllexport) 30#elif WEBRTC_DLL 31#define WEBRTC_DLLEXPORT _declspec(dllimport) 32#else 33#define WEBRTC_DLLEXPORT 34#endif 35 36#ifndef NULL 37#define NULL 0 38#endif 39 40#define RTP_PAYLOAD_NAME_SIZE 32 41 42#if defined(WEBRTC_WIN) || defined(WIN32) 43// Compares two strings without regard to case. 44#define STR_CASE_CMP(s1, s2) ::_stricmp(s1, s2) 45// Compares characters of two strings without regard to case. 46#define STR_NCASE_CMP(s1, s2, n) ::_strnicmp(s1, s2, n) 47#else 48#define STR_CASE_CMP(s1, s2) ::strcasecmp(s1, s2) 49#define STR_NCASE_CMP(s1, s2, n) ::strncasecmp(s1, s2, n) 50#endif 51 52namespace webrtc { 53 54class Config; 55 56class InStream 57{ 58public: 59 virtual int Read(void *buf,int len) = 0; 60 virtual int Rewind() {return -1;} 61 virtual ~InStream() {} 62protected: 63 InStream() {} 64}; 65 66class OutStream 67{ 68public: 69 virtual bool Write(const void *buf,int len) = 0; 70 virtual int Rewind() {return -1;} 71 virtual ~OutStream() {} 72protected: 73 OutStream() {} 74}; 75 76enum TraceModule 77{ 78 kTraceUndefined = 0, 79 // not a module, triggered from the engine code 80 kTraceVoice = 0x0001, 81 // not a module, triggered from the engine code 82 kTraceVideo = 0x0002, 83 // not a module, triggered from the utility code 84 kTraceUtility = 0x0003, 85 kTraceRtpRtcp = 0x0004, 86 kTraceTransport = 0x0005, 87 kTraceSrtp = 0x0006, 88 kTraceAudioCoding = 0x0007, 89 kTraceAudioMixerServer = 0x0008, 90 kTraceAudioMixerClient = 0x0009, 91 kTraceFile = 0x000a, 92 kTraceAudioProcessing = 0x000b, 93 kTraceVideoCoding = 0x0010, 94 kTraceVideoMixer = 0x0011, 95 kTraceAudioDevice = 0x0012, 96 kTraceVideoRenderer = 0x0014, 97 kTraceVideoCapture = 0x0015, 98 kTraceRemoteBitrateEstimator = 0x0017, 99}; 100 101enum TraceLevel 102{ 103 kTraceNone = 0x0000, // no trace 104 kTraceStateInfo = 0x0001, 105 kTraceWarning = 0x0002, 106 kTraceError = 0x0004, 107 kTraceCritical = 0x0008, 108 kTraceApiCall = 0x0010, 109 kTraceDefault = 0x00ff, 110 111 kTraceModuleCall = 0x0020, 112 kTraceMemory = 0x0100, // memory info 113 kTraceTimer = 0x0200, // timing info 114 kTraceStream = 0x0400, // "continuous" stream of data 115 116 // used for debug purposes 117 kTraceDebug = 0x0800, // debug 118 kTraceInfo = 0x1000, // debug info 119 120 // Non-verbose level used by LS_INFO of logging.h. Do not use directly. 121 kTraceTerseInfo = 0x2000, 122 123 kTraceAll = 0xffff 124}; 125 126// External Trace API 127class TraceCallback { 128 public: 129 virtual void Print(TraceLevel level, const char* message, int length) = 0; 130 131 protected: 132 virtual ~TraceCallback() {} 133 TraceCallback() {} 134}; 135 136enum FileFormats 137{ 138 kFileFormatWavFile = 1, 139 kFileFormatCompressedFile = 2, 140 kFileFormatAviFile = 3, 141 kFileFormatPreencodedFile = 4, 142 kFileFormatPcm16kHzFile = 7, 143 kFileFormatPcm8kHzFile = 8, 144 kFileFormatPcm32kHzFile = 9 145}; 146 147enum ProcessingTypes 148{ 149 kPlaybackPerChannel = 0, 150 kPlaybackAllChannelsMixed, 151 kRecordingPerChannel, 152 kRecordingAllChannelsMixed, 153 kRecordingPreprocessing 154}; 155 156enum FrameType 157{ 158 kFrameEmpty = 0, 159 kAudioFrameSpeech = 1, 160 kAudioFrameCN = 2, 161 kVideoFrameKey = 3, // independent frame 162 kVideoFrameDelta = 4, // depends on the previus frame 163}; 164 165// External transport callback interface 166class Transport 167{ 168public: 169 virtual int SendPacket(int channel, const void *data, int len) = 0; 170 virtual int SendRTCPPacket(int channel, const void *data, int len) = 0; 171 172protected: 173 virtual ~Transport() {} 174 Transport() {} 175}; 176 177// Statistics for an RTCP channel 178struct RtcpStatistics { 179 RtcpStatistics() 180 : fraction_lost(0), 181 cumulative_lost(0), 182 extended_max_sequence_number(0), 183 jitter(0) {} 184 185 uint8_t fraction_lost; 186 uint32_t cumulative_lost; 187 uint32_t extended_max_sequence_number; 188 uint32_t jitter; 189}; 190 191// Callback, called whenever a new rtcp report block is transmitted. 192class RtcpStatisticsCallback { 193 public: 194 virtual ~RtcpStatisticsCallback() {} 195 196 virtual void StatisticsUpdated(const RtcpStatistics& statistics, 197 uint32_t ssrc) = 0; 198}; 199 200// Statistics for RTCP packet types. 201struct RtcpPacketTypeCounter { 202 RtcpPacketTypeCounter() 203 : nack_packets(0), 204 fir_packets(0), 205 pli_packets(0) {} 206 207 void Add(const RtcpPacketTypeCounter& other) { 208 nack_packets += other.nack_packets; 209 fir_packets += other.fir_packets; 210 pli_packets += other.pli_packets; 211 } 212 213 uint32_t nack_packets; 214 uint32_t fir_packets; 215 uint32_t pli_packets; 216}; 217 218// Data usage statistics for a (rtp) stream 219struct StreamDataCounters { 220 StreamDataCounters() 221 : bytes(0), 222 header_bytes(0), 223 padding_bytes(0), 224 packets(0), 225 retransmitted_packets(0), 226 fec_packets(0) {} 227 228 // TODO(pbos): Rename bytes -> media_bytes. 229 uint32_t bytes; // Payload bytes, excluding RTP headers and padding. 230 uint32_t header_bytes; // Number of bytes used by RTP headers. 231 uint32_t padding_bytes; // Number of padding bytes. 232 uint32_t packets; // Number of packets. 233 uint32_t retransmitted_packets; // Number of retransmitted packets. 234 uint32_t fec_packets; // Number of redundancy packets. 235}; 236 237// Callback, called whenever byte/packet counts have been updated. 238class StreamDataCountersCallback { 239 public: 240 virtual ~StreamDataCountersCallback() {} 241 242 virtual void DataCountersUpdated(const StreamDataCounters& counters, 243 uint32_t ssrc) = 0; 244}; 245 246// Rate statistics for a stream 247struct BitrateStatistics { 248 BitrateStatistics() : bitrate_bps(0), packet_rate(0), timestamp_ms(0) {} 249 250 uint32_t bitrate_bps; // Bitrate in bits per second. 251 uint32_t packet_rate; // Packet rate in packets per second. 252 uint64_t timestamp_ms; // Ntp timestamp in ms at time of rate estimation. 253}; 254 255// Callback, used to notify an observer whenever new rates have been estimated. 256class BitrateStatisticsObserver { 257 public: 258 virtual ~BitrateStatisticsObserver() {} 259 260 virtual void Notify(const BitrateStatistics& stats, uint32_t ssrc) = 0; 261}; 262 263// Callback, used to notify an observer whenever frame counts have been updated 264class FrameCountObserver { 265 public: 266 virtual ~FrameCountObserver() {} 267 virtual void FrameCountUpdated(FrameType frame_type, 268 uint32_t frame_count, 269 const unsigned int ssrc) = 0; 270}; 271 272// Callback, used to notify an observer whenever the send-side delay is updated. 273class SendSideDelayObserver { 274 public: 275 virtual ~SendSideDelayObserver() {} 276 virtual void SendSideDelayUpdated(int avg_delay_ms, 277 int max_delay_ms, 278 uint32_t ssrc) = 0; 279}; 280 281// ================================================================== 282// Voice specific types 283// ================================================================== 284 285// Each codec supported can be described by this structure. 286struct CodecInst { 287 int pltype; 288 char plname[RTP_PAYLOAD_NAME_SIZE]; 289 int plfreq; 290 int pacsize; 291 int channels; 292 int rate; // bits/sec unlike {start,min,max}Bitrate elsewhere in this file! 293 294 bool operator==(const CodecInst& other) const { 295 return pltype == other.pltype && 296 (STR_CASE_CMP(plname, other.plname) == 0) && 297 plfreq == other.plfreq && 298 pacsize == other.pacsize && 299 channels == other.channels && 300 rate == other.rate; 301 } 302 303 bool operator!=(const CodecInst& other) const { 304 return !(*this == other); 305 } 306}; 307 308// RTP 309enum {kRtpCsrcSize = 15}; // RFC 3550 page 13 310 311enum RTPDirections 312{ 313 kRtpIncoming = 0, 314 kRtpOutgoing 315}; 316 317enum PayloadFrequencies 318{ 319 kFreq8000Hz = 8000, 320 kFreq16000Hz = 16000, 321 kFreq32000Hz = 32000 322}; 323 324enum VadModes // degree of bandwidth reduction 325{ 326 kVadConventional = 0, // lowest reduction 327 kVadAggressiveLow, 328 kVadAggressiveMid, 329 kVadAggressiveHigh // highest reduction 330}; 331 332struct NetworkStatistics // NETEQ statistics 333{ 334 // current jitter buffer size in ms 335 uint16_t currentBufferSize; 336 // preferred (optimal) buffer size in ms 337 uint16_t preferredBufferSize; 338 // adding extra delay due to "peaky jitter" 339 bool jitterPeaksFound; 340 // loss rate (network + late) in percent (in Q14) 341 uint16_t currentPacketLossRate; 342 // late loss rate in percent (in Q14) 343 uint16_t currentDiscardRate; 344 // fraction (of original stream) of synthesized speech inserted through 345 // expansion (in Q14) 346 uint16_t currentExpandRate; 347 // fraction of synthesized speech inserted through pre-emptive expansion 348 // (in Q14) 349 uint16_t currentPreemptiveRate; 350 // fraction of data removed through acceleration (in Q14) 351 uint16_t currentAccelerateRate; 352 // clock-drift in parts-per-million (negative or positive) 353 int32_t clockDriftPPM; 354 // average packet waiting time in the jitter buffer (ms) 355 int meanWaitingTimeMs; 356 // median packet waiting time in the jitter buffer (ms) 357 int medianWaitingTimeMs; 358 // min packet waiting time in the jitter buffer (ms) 359 int minWaitingTimeMs; 360 // max packet waiting time in the jitter buffer (ms) 361 int maxWaitingTimeMs; 362 // added samples in off mode due to packet loss 363 int addedSamples; 364}; 365 366// Statistics for calls to AudioCodingModule::PlayoutData10Ms(). 367struct AudioDecodingCallStats { 368 AudioDecodingCallStats() 369 : calls_to_silence_generator(0), 370 calls_to_neteq(0), 371 decoded_normal(0), 372 decoded_plc(0), 373 decoded_cng(0), 374 decoded_plc_cng(0) {} 375 376 int calls_to_silence_generator; // Number of calls where silence generated, 377 // and NetEq was disengaged from decoding. 378 int calls_to_neteq; // Number of calls to NetEq. 379 int decoded_normal; // Number of calls where audio RTP packet decoded. 380 int decoded_plc; // Number of calls resulted in PLC. 381 int decoded_cng; // Number of calls where comfort noise generated due to DTX. 382 int decoded_plc_cng; // Number of calls resulted where PLC faded to CNG. 383}; 384 385typedef struct 386{ 387 int min; // minumum 388 int max; // maximum 389 int average; // average 390} StatVal; 391 392typedef struct // All levels are reported in dBm0 393{ 394 StatVal speech_rx; // long-term speech levels on receiving side 395 StatVal speech_tx; // long-term speech levels on transmitting side 396 StatVal noise_rx; // long-term noise/silence levels on receiving side 397 StatVal noise_tx; // long-term noise/silence levels on transmitting side 398} LevelStatistics; 399 400typedef struct // All levels are reported in dB 401{ 402 StatVal erl; // Echo Return Loss 403 StatVal erle; // Echo Return Loss Enhancement 404 StatVal rerl; // RERL = ERL + ERLE 405 // Echo suppression inside EC at the point just before its NLP 406 StatVal a_nlp; 407} EchoStatistics; 408 409enum NsModes // type of Noise Suppression 410{ 411 kNsUnchanged = 0, // previously set mode 412 kNsDefault, // platform default 413 kNsConference, // conferencing default 414 kNsLowSuppression, // lowest suppression 415 kNsModerateSuppression, 416 kNsHighSuppression, 417 kNsVeryHighSuppression, // highest suppression 418}; 419 420enum AgcModes // type of Automatic Gain Control 421{ 422 kAgcUnchanged = 0, // previously set mode 423 kAgcDefault, // platform default 424 // adaptive mode for use when analog volume control exists (e.g. for 425 // PC softphone) 426 kAgcAdaptiveAnalog, 427 // scaling takes place in the digital domain (e.g. for conference servers 428 // and embedded devices) 429 kAgcAdaptiveDigital, 430 // can be used on embedded devices where the capture signal level 431 // is predictable 432 kAgcFixedDigital 433}; 434 435// EC modes 436enum EcModes // type of Echo Control 437{ 438 kEcUnchanged = 0, // previously set mode 439 kEcDefault, // platform default 440 kEcConference, // conferencing default (aggressive AEC) 441 kEcAec, // Acoustic Echo Cancellation 442 kEcAecm, // AEC mobile 443}; 444 445// AECM modes 446enum AecmModes // mode of AECM 447{ 448 kAecmQuietEarpieceOrHeadset = 0, 449 // Quiet earpiece or headset use 450 kAecmEarpiece, // most earpiece use 451 kAecmLoudEarpiece, // Loud earpiece or quiet speakerphone use 452 kAecmSpeakerphone, // most speakerphone use (default) 453 kAecmLoudSpeakerphone // Loud speakerphone 454}; 455 456// AGC configuration 457typedef struct 458{ 459 unsigned short targetLeveldBOv; 460 unsigned short digitalCompressionGaindB; 461 bool limiterEnable; 462} AgcConfig; // AGC configuration parameters 463 464enum StereoChannel 465{ 466 kStereoLeft = 0, 467 kStereoRight, 468 kStereoBoth 469}; 470 471// Audio device layers 472enum AudioLayers 473{ 474 kAudioPlatformDefault = 0, 475 kAudioWindowsWave = 1, 476 kAudioWindowsCore = 2, 477 kAudioLinuxAlsa = 3, 478 kAudioLinuxPulse = 4 479}; 480 481// TODO(henrika): to be removed. 482enum NetEqModes // NetEQ playout configurations 483{ 484 // Optimized trade-off between low delay and jitter robustness for two-way 485 // communication. 486 kNetEqDefault = 0, 487 // Improved jitter robustness at the cost of increased delay. Can be 488 // used in one-way communication. 489 kNetEqStreaming = 1, 490 // Optimzed for decodability of fax signals rather than for perceived audio 491 // quality. 492 kNetEqFax = 2, 493 // Minimal buffer management. Inserts zeros for lost packets and during 494 // buffer increases. 495 kNetEqOff = 3, 496}; 497 498// TODO(henrika): to be removed. 499enum OnHoldModes // On Hold direction 500{ 501 kHoldSendAndPlay = 0, // Put both sending and playing in on-hold state. 502 kHoldSendOnly, // Put only sending in on-hold state. 503 kHoldPlayOnly // Put only playing in on-hold state. 504}; 505 506// TODO(henrika): to be removed. 507enum AmrMode 508{ 509 kRfc3267BwEfficient = 0, 510 kRfc3267OctetAligned = 1, 511 kRfc3267FileStorage = 2, 512}; 513 514// ================================================================== 515// Video specific types 516// ================================================================== 517 518// Raw video types 519enum RawVideoType 520{ 521 kVideoI420 = 0, 522 kVideoYV12 = 1, 523 kVideoYUY2 = 2, 524 kVideoUYVY = 3, 525 kVideoIYUV = 4, 526 kVideoARGB = 5, 527 kVideoRGB24 = 6, 528 kVideoRGB565 = 7, 529 kVideoARGB4444 = 8, 530 kVideoARGB1555 = 9, 531 kVideoMJPEG = 10, 532 kVideoNV12 = 11, 533 kVideoNV21 = 12, 534 kVideoBGRA = 13, 535 kVideoUnknown = 99 536}; 537 538// Video codec 539enum { kConfigParameterSize = 128}; 540enum { kPayloadNameSize = 32}; 541enum { kMaxSimulcastStreams = 4}; 542enum { kMaxTemporalStreams = 4}; 543 544enum VideoCodecComplexity 545{ 546 kComplexityNormal = 0, 547 kComplexityHigh = 1, 548 kComplexityHigher = 2, 549 kComplexityMax = 3 550}; 551 552enum VideoCodecProfile 553{ 554 kProfileBase = 0x00, 555 kProfileMain = 0x01 556}; 557 558enum VP8ResilienceMode { 559 kResilienceOff, // The stream produced by the encoder requires a 560 // recovery frame (typically a key frame) to be 561 // decodable after a packet loss. 562 kResilientStream, // A stream produced by the encoder is resilient to 563 // packet losses, but packets within a frame subsequent 564 // to a loss can't be decoded. 565 kResilientFrames // Same as kResilientStream but with added resilience 566 // within a frame. 567}; 568 569// VP8 specific 570struct VideoCodecVP8 { 571 bool pictureLossIndicationOn; 572 bool feedbackModeOn; 573 VideoCodecComplexity complexity; 574 VP8ResilienceMode resilience; 575 unsigned char numberOfTemporalLayers; 576 bool denoisingOn; 577 bool errorConcealmentOn; 578 bool automaticResizeOn; 579 bool frameDroppingOn; 580 int keyFrameInterval; 581 582 bool operator==(const VideoCodecVP8& other) const { 583 return pictureLossIndicationOn == other.pictureLossIndicationOn && 584 feedbackModeOn == other.feedbackModeOn && 585 complexity == other.complexity && 586 resilience == other.resilience && 587 numberOfTemporalLayers == other.numberOfTemporalLayers && 588 denoisingOn == other.denoisingOn && 589 errorConcealmentOn == other.errorConcealmentOn && 590 automaticResizeOn == other.automaticResizeOn && 591 frameDroppingOn == other.frameDroppingOn && 592 keyFrameInterval == other.keyFrameInterval; 593 } 594 595 bool operator!=(const VideoCodecVP8& other) const { 596 return !(*this == other); 597 } 598}; 599 600// H264 specific. 601struct VideoCodecH264 602{ 603 VideoCodecProfile profile; 604 bool frameDroppingOn; 605 int keyFrameInterval; 606 // These are NULL/0 if not externally negotiated. 607 const uint8_t* spsData; 608 size_t spsLen; 609 const uint8_t* ppsData; 610 size_t ppsLen; 611}; 612 613// Video codec types 614enum VideoCodecType 615{ 616 kVideoCodecVP8, 617 kVideoCodecH264, 618 kVideoCodecI420, 619 kVideoCodecRED, 620 kVideoCodecULPFEC, 621 kVideoCodecGeneric, 622 kVideoCodecUnknown 623}; 624 625union VideoCodecUnion 626{ 627 VideoCodecVP8 VP8; 628 VideoCodecH264 H264; 629}; 630 631 632// Simulcast is when the same stream is encoded multiple times with different 633// settings such as resolution. 634struct SimulcastStream { 635 unsigned short width; 636 unsigned short height; 637 unsigned char numberOfTemporalLayers; 638 unsigned int maxBitrate; // kilobits/sec. 639 unsigned int targetBitrate; // kilobits/sec. 640 unsigned int minBitrate; // kilobits/sec. 641 unsigned int qpMax; // minimum quality 642 643 bool operator==(const SimulcastStream& other) const { 644 return width == other.width && 645 height == other.height && 646 numberOfTemporalLayers == other.numberOfTemporalLayers && 647 maxBitrate == other.maxBitrate && 648 targetBitrate == other.targetBitrate && 649 minBitrate == other.minBitrate && 650 qpMax == other.qpMax; 651 } 652 653 bool operator!=(const SimulcastStream& other) const { 654 return !(*this == other); 655 } 656}; 657 658enum VideoCodecMode { 659 kRealtimeVideo, 660 kScreensharing 661}; 662 663// Common video codec properties 664struct VideoCodec { 665 VideoCodecType codecType; 666 char plName[kPayloadNameSize]; 667 unsigned char plType; 668 669 unsigned short width; 670 unsigned short height; 671 672 unsigned int startBitrate; // kilobits/sec. 673 unsigned int maxBitrate; // kilobits/sec. 674 unsigned int minBitrate; // kilobits/sec. 675 unsigned int targetBitrate; // kilobits/sec. 676 677 unsigned char maxFramerate; 678 679 VideoCodecUnion codecSpecific; 680 681 unsigned int qpMax; 682 unsigned char numberOfSimulcastStreams; 683 SimulcastStream simulcastStream[kMaxSimulcastStreams]; 684 685 VideoCodecMode mode; 686 687 // When using an external encoder/decoder this allows to pass 688 // extra options without requiring webrtc to be aware of them. 689 Config* extra_options; 690 691 bool operator==(const VideoCodec& other) const { 692 bool ret = codecType == other.codecType && 693 (STR_CASE_CMP(plName, other.plName) == 0) && 694 plType == other.plType && 695 width == other.width && 696 height == other.height && 697 startBitrate == other.startBitrate && 698 maxBitrate == other.maxBitrate && 699 minBitrate == other.minBitrate && 700 targetBitrate == other.targetBitrate && 701 maxFramerate == other.maxFramerate && 702 qpMax == other.qpMax && 703 numberOfSimulcastStreams == other.numberOfSimulcastStreams && 704 mode == other.mode; 705 if (ret && codecType == kVideoCodecVP8) { 706 ret &= (codecSpecific.VP8 == other.codecSpecific.VP8); 707 } 708 709 for (unsigned char i = 0; i < other.numberOfSimulcastStreams && ret; ++i) { 710 ret &= (simulcastStream[i] == other.simulcastStream[i]); 711 } 712 return ret; 713 } 714 715 bool operator!=(const VideoCodec& other) const { 716 return !(*this == other); 717 } 718}; 719 720// Bandwidth over-use detector options. These are used to drive 721// experimentation with bandwidth estimation parameters. 722// See modules/remote_bitrate_estimator/overuse_detector.h 723struct OverUseDetectorOptions { 724 OverUseDetectorOptions() 725 : initial_slope(8.0/512.0), 726 initial_offset(0), 727 initial_e(), 728 initial_process_noise(), 729 initial_avg_noise(0.0), 730 initial_var_noise(50), 731 initial_threshold(25.0) { 732 initial_e[0][0] = 100; 733 initial_e[1][1] = 1e-1; 734 initial_e[0][1] = initial_e[1][0] = 0; 735 initial_process_noise[0] = 1e-10; 736 initial_process_noise[1] = 1e-2; 737 } 738 double initial_slope; 739 double initial_offset; 740 double initial_e[2][2]; 741 double initial_process_noise[2]; 742 double initial_avg_noise; 743 double initial_var_noise; 744 double initial_threshold; 745}; 746 747// This structure will have the information about when packet is actually 748// received by socket. 749struct PacketTime { 750 PacketTime() : timestamp(-1), not_before(-1) {} 751 PacketTime(int64_t timestamp, int64_t not_before) 752 : timestamp(timestamp), not_before(not_before) { 753 } 754 755 int64_t timestamp; // Receive time after socket delivers the data. 756 int64_t not_before; // Earliest possible time the data could have arrived, 757 // indicating the potential error in the |timestamp| 758 // value,in case the system is busy. 759 // For example, the time of the last select() call. 760 // If unknown, this value will be set to zero. 761}; 762 763struct RTPHeaderExtension { 764 RTPHeaderExtension() 765 : hasTransmissionTimeOffset(false), 766 transmissionTimeOffset(0), 767 hasAbsoluteSendTime(false), 768 absoluteSendTime(0), 769 hasAudioLevel(false), 770 audioLevel(0) {} 771 772 bool hasTransmissionTimeOffset; 773 int32_t transmissionTimeOffset; 774 bool hasAbsoluteSendTime; 775 uint32_t absoluteSendTime; 776 777 // Audio Level includes both level in dBov and voiced/unvoiced bit. See: 778 // https://datatracker.ietf.org/doc/draft-lennox-avt-rtp-audio-level-exthdr/ 779 bool hasAudioLevel; 780 uint8_t audioLevel; 781}; 782 783struct RTPHeader { 784 RTPHeader() 785 : markerBit(false), 786 payloadType(0), 787 sequenceNumber(0), 788 timestamp(0), 789 ssrc(0), 790 numCSRCs(0), 791 paddingLength(0), 792 headerLength(0), 793 payload_type_frequency(0), 794 extension() { 795 memset(&arrOfCSRCs, 0, sizeof(arrOfCSRCs)); 796 } 797 798 bool markerBit; 799 uint8_t payloadType; 800 uint16_t sequenceNumber; 801 uint32_t timestamp; 802 uint32_t ssrc; 803 uint8_t numCSRCs; 804 uint32_t arrOfCSRCs[kRtpCsrcSize]; 805 uint8_t paddingLength; 806 uint16_t headerLength; 807 int payload_type_frequency; 808 RTPHeaderExtension extension; 809}; 810 811} // namespace webrtc 812 813#endif // WEBRTC_COMMON_TYPES_H_ 814