1/*
2 *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
12
13#include <assert.h>
14#include <stdlib.h>
15#include <vector>
16
17#include "webrtc/base/checks.h"
18#include "webrtc/engine_configurations.h"
19#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
20#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
21#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
22#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
23#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
24#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
25#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
26#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
27#include "webrtc/system_wrappers/interface/trace.h"
28#include "webrtc/typedefs.h"
29
30namespace webrtc {
31
32namespace acm2 {
33
34enum {
35  kACMToneEnd = 999
36};
37
38// Maximum number of bytes in one packet (PCM16B, 20 ms packets, stereo).
39enum {
40  kMaxPacketSize = 2560
41};
42
43// Maximum number of payloads that can be packed in one RED packet. For
44// regular RED, we only pack two payloads. In case of dual-streaming, in worst
45// case we might pack 3 payloads in one RED packet.
46enum {
47  kNumRedFragmentationVectors = 2,
48  kMaxNumFragmentationVectors = 3
49};
50
51// If packet N is arrived all packets prior to N - |kNackThresholdPackets| which
52// are not received are considered as lost, and appear in NACK list.
53enum {
54  kNackThresholdPackets = 2
55};
56
57namespace {
58
59// TODO(turajs): the same functionality is used in NetEq. If both classes
60// need them, make it a static function in ACMCodecDB.
61bool IsCodecRED(const CodecInst* codec) {
62  return (STR_CASE_CMP(codec->plname, "RED") == 0);
63}
64
65bool IsCodecRED(int index) {
66  return (IsCodecRED(&ACMCodecDB::database_[index]));
67}
68
69bool IsCodecCN(const CodecInst* codec) {
70  return (STR_CASE_CMP(codec->plname, "CN") == 0);
71}
72
73bool IsCodecCN(int index) {
74  return (IsCodecCN(&ACMCodecDB::database_[index]));
75}
76
77// Stereo-to-mono can be used as in-place.
78int DownMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
79  if (length_out_buff < frame.samples_per_channel_) {
80    return -1;
81  }
82  for (int n = 0; n < frame.samples_per_channel_; ++n)
83    out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1;
84  return 0;
85}
86
87// Mono-to-stereo can be used as in-place.
88int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
89  if (length_out_buff < frame.samples_per_channel_) {
90    return -1;
91  }
92  for (int n = frame.samples_per_channel_ - 1; n >= 0; --n) {
93    out_buff[2 * n + 1] = frame.data_[n];
94    out_buff[2 * n] = frame.data_[n];
95  }
96  return 0;
97}
98
99// Return 1 if timestamp t1 is less than timestamp t2, while compensating for
100// wrap-around.
101static int TimestampLessThan(uint32_t t1, uint32_t t2) {
102  uint32_t kHalfFullRange = static_cast<uint32_t>(0xFFFFFFFF) / 2;
103  if (t1 == t2) {
104    return 0;
105  } else if (t1 < t2) {
106    if (t2 - t1 < kHalfFullRange)
107      return 1;
108    return 0;
109  } else {
110    if (t1 - t2 < kHalfFullRange)
111      return 0;
112    return 1;
113  }
114}
115
116}  // namespace
117
118AudioCodingModuleImpl::AudioCodingModuleImpl(
119    const AudioCodingModule::Config& config)
120    : acm_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
121      id_(config.id),
122      expected_codec_ts_(0xD87F3F9F),
123      expected_in_ts_(0xD87F3F9F),
124      send_codec_inst_(),
125      cng_nb_pltype_(255),
126      cng_wb_pltype_(255),
127      cng_swb_pltype_(255),
128      cng_fb_pltype_(255),
129      red_pltype_(255),
130      vad_enabled_(false),
131      dtx_enabled_(false),
132      vad_mode_(VADNormal),
133      stereo_send_(false),
134      current_send_codec_idx_(-1),
135      send_codec_registered_(false),
136      receiver_(config),
137      is_first_red_(true),
138      red_enabled_(false),
139      last_red_timestamp_(0),
140      codec_fec_enabled_(false),
141      previous_pltype_(255),
142      aux_rtp_header_(NULL),
143      receiver_initialized_(false),
144      secondary_send_codec_inst_(),
145      codec_timestamp_(expected_codec_ts_),
146      first_10ms_data_(false),
147      callback_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
148      packetization_callback_(NULL),
149      vad_callback_(NULL) {
150
151  // Nullify send codec memory, set payload type and set codec name to
152  // invalid values.
153  const char no_name[] = "noCodecRegistered";
154  strncpy(send_codec_inst_.plname, no_name, RTP_PAYLOAD_NAME_SIZE - 1);
155  send_codec_inst_.pltype = -1;
156
157  strncpy(secondary_send_codec_inst_.plname, no_name,
158          RTP_PAYLOAD_NAME_SIZE - 1);
159  secondary_send_codec_inst_.pltype = -1;
160
161  for (int i = 0; i < ACMCodecDB::kMaxNumCodecs; i++) {
162    codecs_[i] = NULL;
163    mirror_codec_idx_[i] = -1;
164  }
165
166  // Allocate memory for RED.
167  red_buffer_ = new uint8_t[MAX_PAYLOAD_SIZE_BYTE];
168
169  // TODO(turajs): This might not be exactly how this class is supposed to work.
170  // The external usage might be that |fragmentationVectorSize| has to match
171  // the allocated space for the member-arrays, while here, we allocate
172  // according to the maximum number of fragmentations and change
173  // |fragmentationVectorSize| on-the-fly based on actual number of
174  // fragmentations. However, due to copying to local variable before calling
175  // SendData, the RTP module receives a "valid" fragmentation, where allocated
176  // space matches |fragmentationVectorSize|, therefore, this should not cause
177  // any problem. A better approach is not using RTPFragmentationHeader as
178  // member variable, instead, use an ACM-specific structure to hold RED-related
179  // data. See module_common_type.h for the definition of
180  // RTPFragmentationHeader.
181  fragmentation_.VerifyAndAllocateFragmentationHeader(
182      kMaxNumFragmentationVectors);
183
184  // Register the default payload type for RED and for CNG at sampling rates of
185  // 8, 16, 32 and 48 kHz.
186  for (int i = (ACMCodecDB::kNumCodecs - 1); i >= 0; i--) {
187    if (IsCodecRED(i)) {
188      red_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
189    } else if (IsCodecCN(i)) {
190      if (ACMCodecDB::database_[i].plfreq == 8000) {
191        cng_nb_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
192      } else if (ACMCodecDB::database_[i].plfreq == 16000) {
193        cng_wb_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
194      } else if (ACMCodecDB::database_[i].plfreq == 32000) {
195        cng_swb_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
196      } else if (ACMCodecDB::database_[i].plfreq == 48000) {
197        cng_fb_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
198      }
199    }
200  }
201
202  if (InitializeReceiverSafe() < 0) {
203    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
204                 "Cannot initialize receiver");
205  }
206  WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created");
207}
208
209AudioCodingModuleImpl::~AudioCodingModuleImpl() {
210  {
211    CriticalSectionScoped lock(acm_crit_sect_);
212    current_send_codec_idx_ = -1;
213
214    for (int i = 0; i < ACMCodecDB::kMaxNumCodecs; i++) {
215      if (codecs_[i] != NULL) {
216        // Mirror index holds the address of the codec memory.
217        assert(mirror_codec_idx_[i] > -1);
218        if (codecs_[mirror_codec_idx_[i]] != NULL) {
219          delete codecs_[mirror_codec_idx_[i]];
220          codecs_[mirror_codec_idx_[i]] = NULL;
221        }
222
223        codecs_[i] = NULL;
224      }
225    }
226
227    if (red_buffer_ != NULL) {
228      delete[] red_buffer_;
229      red_buffer_ = NULL;
230    }
231  }
232
233  if (aux_rtp_header_ != NULL) {
234    delete aux_rtp_header_;
235    aux_rtp_header_ = NULL;
236  }
237
238  delete callback_crit_sect_;
239  callback_crit_sect_ = NULL;
240
241  delete acm_crit_sect_;
242  acm_crit_sect_ = NULL;
243  WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_,
244               "Destroyed");
245}
246
247int32_t AudioCodingModuleImpl::ChangeUniqueId(const int32_t id) {
248  {
249    CriticalSectionScoped lock(acm_crit_sect_);
250    id_ = id;
251
252    for (int i = 0; i < ACMCodecDB::kMaxNumCodecs; i++) {
253      if (codecs_[i] != NULL) {
254        codecs_[i]->SetUniqueID(id);
255      }
256    }
257  }
258
259  receiver_.set_id(id_);
260  return 0;
261}
262
263// Returns the number of milliseconds until the module want a
264// worker thread to call Process.
265int32_t AudioCodingModuleImpl::TimeUntilNextProcess() {
266  CriticalSectionScoped lock(acm_crit_sect_);
267
268  if (!HaveValidEncoder("TimeUntilNextProcess")) {
269    return -1;
270  }
271  return codecs_[current_send_codec_idx_]->SamplesLeftToEncode() /
272      (send_codec_inst_.plfreq / 1000);
273}
274
275int32_t AudioCodingModuleImpl::Process() {
276  bool dual_stream;
277  {
278    CriticalSectionScoped lock(acm_crit_sect_);
279    dual_stream = (secondary_encoder_.get() != NULL);
280  }
281  if (dual_stream) {
282    return ProcessDualStream();
283  }
284  return ProcessSingleStream();
285}
286
287int AudioCodingModuleImpl::EncodeFragmentation(int fragmentation_index,
288                                               int payload_type,
289                                               uint32_t current_timestamp,
290                                               ACMGenericCodec* encoder,
291                                               uint8_t* stream) {
292  int16_t len_bytes = MAX_PAYLOAD_SIZE_BYTE;
293  uint32_t rtp_timestamp;
294  WebRtcACMEncodingType encoding_type;
295  if (encoder->Encode(stream, &len_bytes, &rtp_timestamp, &encoding_type) < 0) {
296    return -1;
297  }
298  assert(encoding_type == kActiveNormalEncoded);
299  assert(len_bytes > 0);
300
301  fragmentation_.fragmentationLength[fragmentation_index] = len_bytes;
302  fragmentation_.fragmentationPlType[fragmentation_index] = payload_type;
303  fragmentation_.fragmentationTimeDiff[fragmentation_index] =
304      static_cast<uint16_t>(current_timestamp - rtp_timestamp);
305  fragmentation_.fragmentationVectorSize++;
306  return len_bytes;
307}
308
309// Primary payloads are sent immediately, whereas a single secondary payload is
310// buffered to be combined with "the next payload."
311// Normally "the next payload" would be a primary payload. In case two
312// consecutive secondary payloads are generated with no primary payload in
313// between, then two secondary payloads are packed in one RED.
314int AudioCodingModuleImpl::ProcessDualStream() {
315  uint8_t stream[kMaxNumFragmentationVectors * MAX_PAYLOAD_SIZE_BYTE];
316  uint32_t current_timestamp;
317  int16_t length_bytes = 0;
318  RTPFragmentationHeader my_fragmentation;
319
320  uint8_t my_red_payload_type;
321
322  {
323    CriticalSectionScoped lock(acm_crit_sect_);
324    // Check if there is an encoder before.
325    if (!HaveValidEncoder("ProcessDualStream") ||
326        secondary_encoder_.get() == NULL) {
327      return -1;
328    }
329    ACMGenericCodec* primary_encoder = codecs_[current_send_codec_idx_];
330    // If primary encoder has a full frame of audio to generate payload.
331    bool primary_ready_to_encode = primary_encoder->HasFrameToEncode();
332    // If the secondary encoder has a frame of audio to generate a payload.
333    bool secondary_ready_to_encode = secondary_encoder_->HasFrameToEncode();
334
335    if (!primary_ready_to_encode && !secondary_ready_to_encode) {
336      // Nothing to send.
337      return 0;
338    }
339    int len_bytes_previous_secondary = static_cast<int>(
340        fragmentation_.fragmentationLength[2]);
341    assert(len_bytes_previous_secondary <= MAX_PAYLOAD_SIZE_BYTE);
342    bool has_previous_payload = len_bytes_previous_secondary > 0;
343
344    uint32_t primary_timestamp = primary_encoder->EarliestTimestamp();
345    uint32_t secondary_timestamp = secondary_encoder_->EarliestTimestamp();
346
347    if (!has_previous_payload && !primary_ready_to_encode &&
348        secondary_ready_to_encode) {
349      // Secondary payload will be the ONLY bit-stream. Encode by secondary
350      // encoder, store the payload, and return. No packet is sent.
351      int16_t len_bytes = MAX_PAYLOAD_SIZE_BYTE;
352      WebRtcACMEncodingType encoding_type;
353      if (secondary_encoder_->Encode(red_buffer_, &len_bytes,
354                                     &last_red_timestamp_,
355                                     &encoding_type) < 0) {
356        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
357                     "ProcessDual(): Encoding of secondary encoder Failed");
358        return -1;
359      }
360      assert(len_bytes > 0);
361      assert(encoding_type == kActiveNormalEncoded);
362      assert(len_bytes <= MAX_PAYLOAD_SIZE_BYTE);
363      fragmentation_.fragmentationLength[2] = len_bytes;
364      return 0;
365    }
366
367    // Initialize with invalid but different values, so later can have sanity
368    // check if they are different.
369    int index_primary = -1;
370    int index_secondary = -2;
371    int index_previous_secondary = -3;
372
373    if (primary_ready_to_encode) {
374      index_primary = secondary_ready_to_encode ?
375          TimestampLessThan(primary_timestamp, secondary_timestamp) : 0;
376      index_primary += has_previous_payload ?
377          TimestampLessThan(primary_timestamp, last_red_timestamp_) : 0;
378    }
379
380    if (secondary_ready_to_encode) {
381      // Timestamp of secondary payload can only be less than primary payload,
382      // but is always larger than the timestamp of previous secondary payload.
383      index_secondary = primary_ready_to_encode ?
384          (1 - TimestampLessThan(primary_timestamp, secondary_timestamp)) : 0;
385    }
386
387    if (has_previous_payload) {
388      index_previous_secondary = primary_ready_to_encode ?
389          (1 - TimestampLessThan(primary_timestamp, last_red_timestamp_)) : 0;
390      // If secondary is ready it always have a timestamp larger than previous
391      // secondary. So the index is either 0 or 1.
392      index_previous_secondary += secondary_ready_to_encode ? 1 : 0;
393    }
394
395    // Indices must not be equal.
396    assert(index_primary != index_secondary);
397    assert(index_primary != index_previous_secondary);
398    assert(index_secondary != index_previous_secondary);
399
400    // One of the payloads has to be at position zero.
401    assert(index_primary == 0 || index_secondary == 0 ||
402           index_previous_secondary == 0);
403
404    // Timestamp of the RED payload.
405    if (index_primary == 0) {
406      current_timestamp = primary_timestamp;
407    } else if (index_secondary == 0) {
408      current_timestamp = secondary_timestamp;
409    } else {
410      current_timestamp = last_red_timestamp_;
411    }
412
413    fragmentation_.fragmentationVectorSize = 0;
414    if (has_previous_payload) {
415      assert(index_previous_secondary >= 0 &&
416             index_previous_secondary < kMaxNumFragmentationVectors);
417      assert(len_bytes_previous_secondary <= MAX_PAYLOAD_SIZE_BYTE);
418      memcpy(&stream[index_previous_secondary * MAX_PAYLOAD_SIZE_BYTE],
419             red_buffer_, sizeof(stream[0]) * len_bytes_previous_secondary);
420      fragmentation_.fragmentationLength[index_previous_secondary] =
421          len_bytes_previous_secondary;
422      fragmentation_.fragmentationPlType[index_previous_secondary] =
423          secondary_send_codec_inst_.pltype;
424      fragmentation_.fragmentationTimeDiff[index_previous_secondary] =
425          static_cast<uint16_t>(current_timestamp - last_red_timestamp_);
426      fragmentation_.fragmentationVectorSize++;
427    }
428
429    if (primary_ready_to_encode) {
430      assert(index_primary >= 0 && index_primary < kMaxNumFragmentationVectors);
431      int i = index_primary * MAX_PAYLOAD_SIZE_BYTE;
432      if (EncodeFragmentation(index_primary, send_codec_inst_.pltype,
433                              current_timestamp, primary_encoder,
434                              &stream[i]) < 0) {
435        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
436                     "ProcessDualStream(): Encoding of primary encoder Failed");
437        return -1;
438      }
439    }
440
441    if (secondary_ready_to_encode) {
442      assert(index_secondary >= 0 &&
443             index_secondary < kMaxNumFragmentationVectors - 1);
444      int i = index_secondary * MAX_PAYLOAD_SIZE_BYTE;
445      if (EncodeFragmentation(index_secondary,
446                              secondary_send_codec_inst_.pltype,
447                              current_timestamp, secondary_encoder_.get(),
448                              &stream[i]) < 0) {
449        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
450                     "ProcessDualStream(): Encoding of secondary encoder "
451                     "Failed");
452        return -1;
453      }
454    }
455    // Copy to local variable, as it will be used outside the ACM lock.
456    my_fragmentation.CopyFrom(fragmentation_);
457    my_red_payload_type = red_pltype_;
458    length_bytes = 0;
459    for (int n = 0; n < fragmentation_.fragmentationVectorSize; n++) {
460      length_bytes += fragmentation_.fragmentationLength[n];
461    }
462  }
463
464  {
465    CriticalSectionScoped lock(callback_crit_sect_);
466    if (packetization_callback_ != NULL) {
467      // Callback with payload data, including redundant data (RED).
468      if (packetization_callback_->SendData(kAudioFrameSpeech,
469                                            my_red_payload_type,
470                                            current_timestamp, stream,
471                                            length_bytes,
472                                            &my_fragmentation) < 0) {
473        return -1;
474      }
475    }
476  }
477
478  {
479    CriticalSectionScoped lock(acm_crit_sect_);
480    // Now that data is sent, clean up fragmentation.
481    ResetFragmentation(0);
482  }
483  return 0;
484}
485
486// Process any pending tasks such as timeouts.
487int AudioCodingModuleImpl::ProcessSingleStream() {
488  // Make room for 1 RED payload.
489  uint8_t stream[2 * MAX_PAYLOAD_SIZE_BYTE];
490  // TODO(turajs): |length_bytes| & |red_length_bytes| can be of type int if
491  // ACMGenericCodec::Encode() & ACMGenericCodec::GetRedPayload() allows.
492  int16_t length_bytes = 2 * MAX_PAYLOAD_SIZE_BYTE;
493  int16_t red_length_bytes = length_bytes;
494  uint32_t rtp_timestamp;
495  int status;
496  WebRtcACMEncodingType encoding_type;
497  FrameType frame_type = kAudioFrameSpeech;
498  uint8_t current_payload_type = 0;
499  bool has_data_to_send = false;
500  bool red_active = false;
501  RTPFragmentationHeader my_fragmentation;
502
503  // Keep the scope of the ACM critical section limited.
504  {
505    CriticalSectionScoped lock(acm_crit_sect_);
506    // Check if there is an encoder before.
507    if (!HaveValidEncoder("ProcessSingleStream")) {
508      return -1;
509    }
510    status = codecs_[current_send_codec_idx_]->Encode(stream, &length_bytes,
511                                                      &rtp_timestamp,
512                                                      &encoding_type);
513    if (status < 0) {
514      // Encode failed.
515      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
516                   "ProcessSingleStream(): Encoding Failed");
517      length_bytes = 0;
518      return -1;
519    } else if (status == 0) {
520      // Not enough data.
521      return 0;
522    } else {
523      switch (encoding_type) {
524        case kNoEncoding: {
525          current_payload_type = previous_pltype_;
526          frame_type = kFrameEmpty;
527          length_bytes = 0;
528          break;
529        }
530        case kActiveNormalEncoded:
531        case kPassiveNormalEncoded: {
532          current_payload_type = static_cast<uint8_t>(send_codec_inst_.pltype);
533          frame_type = kAudioFrameSpeech;
534          break;
535        }
536        case kPassiveDTXNB: {
537          current_payload_type = cng_nb_pltype_;
538          frame_type = kAudioFrameCN;
539          is_first_red_ = true;
540          break;
541        }
542        case kPassiveDTXWB: {
543          current_payload_type = cng_wb_pltype_;
544          frame_type = kAudioFrameCN;
545          is_first_red_ = true;
546          break;
547        }
548        case kPassiveDTXSWB: {
549          current_payload_type = cng_swb_pltype_;
550          frame_type = kAudioFrameCN;
551          is_first_red_ = true;
552          break;
553        }
554        case kPassiveDTXFB: {
555          current_payload_type = cng_fb_pltype_;
556          frame_type = kAudioFrameCN;
557          is_first_red_ = true;
558          break;
559        }
560      }
561      has_data_to_send = true;
562      previous_pltype_ = current_payload_type;
563
564      // Redundancy encode is done here. The two bitstreams packetized into
565      // one RTP packet and the fragmentation points are set.
566      // Only apply RED on speech data.
567      if ((red_enabled_) &&
568          ((encoding_type == kActiveNormalEncoded) ||
569              (encoding_type == kPassiveNormalEncoded))) {
570        // RED is enabled within this scope.
571        //
572        // Note that, a special solution exists for iSAC since it is the only
573        // codec for which GetRedPayload has a non-empty implementation.
574        //
575        // Summary of the RED scheme below (use iSAC as example):
576        //
577        //  1st (is_first_red_ is true) encoded iSAC frame (primary #1) =>
578        //      - call GetRedPayload() and store redundancy for packet #1 in
579        //        second fragment of RED buffer (old data)
580        //      - drop the primary iSAC frame
581        //      - don't call SendData
582        //  2nd (is_first_red_ is false) encoded iSAC frame (primary #2) =>
583        //      - store primary #2 in 1st fragment of RED buffer and send the
584        //        combined packet
585        //      - the transmitted packet contains primary #2 (new) and
586        //        redundancy for packet #1 (old)
587        //      - call GetRed_Payload() and store redundancy for packet #2 in
588        //        second fragment of RED buffer
589        //
590        //  ...
591        //
592        //  Nth encoded iSAC frame (primary #N) =>
593        //      - store primary #N in 1st fragment of RED buffer and send the
594        //        combined packet
595        //      - the transmitted packet contains primary #N (new) and
596        //        reduncancy for packet #(N-1) (old)
597        //      - call GetRedPayload() and store redundancy for packet #N in
598        //        second fragment of RED buffer
599        //
600        //  For all other codecs, GetRedPayload does nothing and returns -1 =>
601        //  redundant data is only a copy.
602        //
603        //  First combined packet contains : #2 (new) and #1 (old)
604        //  Second combined packet contains: #3 (new) and #2 (old)
605        //  Third combined packet contains : #4 (new) and #3 (old)
606        //
607        //  Hence, even if every second packet is dropped, perfect
608        //  reconstruction is possible.
609        red_active = true;
610
611        has_data_to_send = false;
612        // Skip the following part for the first packet in a RED session.
613        if (!is_first_red_) {
614          // Rearrange stream such that RED packets are included.
615          // Replace stream now that we have stored current stream.
616          memcpy(stream + fragmentation_.fragmentationOffset[1], red_buffer_,
617                 fragmentation_.fragmentationLength[1]);
618          // Update the fragmentation time difference vector, in number of
619          // timestamps.
620          uint16_t time_since_last = static_cast<uint16_t>(
621              rtp_timestamp - last_red_timestamp_);
622
623          // Update fragmentation vectors.
624          fragmentation_.fragmentationPlType[1] =
625              fragmentation_.fragmentationPlType[0];
626          fragmentation_.fragmentationTimeDiff[1] = time_since_last;
627          has_data_to_send = true;
628        }
629
630        // Insert new packet length.
631        fragmentation_.fragmentationLength[0] = length_bytes;
632
633        // Insert new packet payload type.
634        fragmentation_.fragmentationPlType[0] = current_payload_type;
635        last_red_timestamp_ = rtp_timestamp;
636
637        // Can be modified by the GetRedPayload() call if iSAC is utilized.
638        red_length_bytes = length_bytes;
639
640        // A fragmentation header is provided => packetization according to
641        // RFC 2198 (RTP Payload for Redundant Audio Data) will be used.
642        // First fragment is the current data (new).
643        // Second fragment is the previous data (old).
644        length_bytes = static_cast<int16_t>(
645            fragmentation_.fragmentationLength[0] +
646            fragmentation_.fragmentationLength[1]);
647
648        // Get, and store, redundant data from the encoder based on the recently
649        // encoded frame.
650        // NOTE - only iSAC contains an implementation; all other codecs does
651        // nothing and returns -1.
652        if (codecs_[current_send_codec_idx_]->GetRedPayload(
653            red_buffer_, &red_length_bytes) == -1) {
654          // The codec was not iSAC => use current encoder output as redundant
655          // data instead (trivial RED scheme).
656          memcpy(red_buffer_, stream, red_length_bytes);
657        }
658
659        is_first_red_ = false;
660        // Update payload type with RED payload type.
661        current_payload_type = red_pltype_;
662        // We have packed 2 payloads.
663        fragmentation_.fragmentationVectorSize = kNumRedFragmentationVectors;
664
665        // Copy to local variable, as it will be used outside ACM lock.
666        my_fragmentation.CopyFrom(fragmentation_);
667        // Store RED length.
668        fragmentation_.fragmentationLength[1] = red_length_bytes;
669      }
670    }
671  }
672
673  if (has_data_to_send) {
674    CriticalSectionScoped lock(callback_crit_sect_);
675
676    if (packetization_callback_ != NULL) {
677      if (red_active) {
678        // Callback with payload data, including redundant data (RED).
679        packetization_callback_->SendData(frame_type, current_payload_type,
680                                          rtp_timestamp, stream, length_bytes,
681                                          &my_fragmentation);
682      } else {
683        // Callback with payload data.
684        packetization_callback_->SendData(frame_type, current_payload_type,
685                                          rtp_timestamp, stream, length_bytes,
686                                          NULL);
687      }
688    }
689
690    if (vad_callback_ != NULL) {
691      // Callback with VAD decision.
692      vad_callback_->InFrameType(static_cast<int16_t>(encoding_type));
693    }
694  }
695  return length_bytes;
696}
697
698/////////////////////////////////////////
699//   Sender
700//
701
702// Initialize send codec.
703int AudioCodingModuleImpl::InitializeSender() {
704  CriticalSectionScoped lock(acm_crit_sect_);
705
706  // Start with invalid values.
707  send_codec_registered_ = false;
708  current_send_codec_idx_ = -1;
709  send_codec_inst_.plname[0] = '\0';
710
711  // Delete all encoders to start fresh.
712  for (int id = 0; id < ACMCodecDB::kMaxNumCodecs; id++) {
713    if (codecs_[id] != NULL) {
714      codecs_[id]->DestructEncoder();
715    }
716  }
717
718  // Initialize RED.
719  is_first_red_ = true;
720  if (red_enabled_ || secondary_encoder_.get() != NULL) {
721    if (red_buffer_ != NULL) {
722      memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE);
723    }
724    if (red_enabled_) {
725      ResetFragmentation(kNumRedFragmentationVectors);
726    } else {
727      ResetFragmentation(0);
728    }
729  }
730
731  return 0;
732}
733
734int AudioCodingModuleImpl::ResetEncoder() {
735  CriticalSectionScoped lock(acm_crit_sect_);
736  if (!HaveValidEncoder("ResetEncoder")) {
737    return -1;
738  }
739  return codecs_[current_send_codec_idx_]->ResetEncoder();
740}
741
742ACMGenericCodec* AudioCodingModuleImpl::CreateCodec(const CodecInst& codec) {
743  ACMGenericCodec* my_codec = NULL;
744
745  my_codec = ACMCodecDB::CreateCodecInstance(codec);
746  if (my_codec == NULL) {
747    // Error, could not create the codec.
748    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
749                 "ACMCodecDB::CreateCodecInstance() failed in CreateCodec()");
750    return my_codec;
751  }
752  my_codec->SetUniqueID(id_);
753
754  return my_codec;
755}
756
757// Check if the given codec is a valid to be registered as send codec.
758static int IsValidSendCodec(const CodecInst& send_codec,
759                            bool is_primary_encoder,
760                            int acm_id,
761                            int* mirror_id) {
762  if ((send_codec.channels != 1) && (send_codec.channels != 2)) {
763    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id,
764                 "Wrong number of channels (%d, only mono and stereo are "
765                 "supported) for %s encoder", send_codec.channels,
766                 is_primary_encoder ? "primary" : "secondary");
767    return -1;
768  }
769
770  int codec_id = ACMCodecDB::CodecNumber(send_codec, mirror_id);
771  if (codec_id < 0) {
772    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id,
773                 "Invalid codec setting for the send codec.");
774    return -1;
775  }
776
777  // TODO(tlegrand): Remove this check. Already taken care of in
778  // ACMCodecDB::CodecNumber().
779  // Check if the payload-type is valid
780  if (!ACMCodecDB::ValidPayloadType(send_codec.pltype)) {
781    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id,
782                 "Invalid payload-type %d for %s.", send_codec.pltype,
783                 send_codec.plname);
784    return -1;
785  }
786
787  // Telephone-event cannot be a send codec.
788  if (!STR_CASE_CMP(send_codec.plname, "telephone-event")) {
789    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id,
790                 "telephone-event cannot be a send codec");
791    *mirror_id = -1;
792    return -1;
793  }
794
795  if (ACMCodecDB::codec_settings_[codec_id].channel_support
796      < send_codec.channels) {
797    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id,
798                 "%d number of channels not supportedn for %s.",
799                 send_codec.channels, send_codec.plname);
800    *mirror_id = -1;
801    return -1;
802  }
803
804  if (!is_primary_encoder) {
805    // If registering the secondary encoder, then RED and CN are not valid
806    // choices as encoder.
807    if (IsCodecRED(&send_codec)) {
808      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id,
809                   "RED cannot be secondary codec");
810      *mirror_id = -1;
811      return -1;
812    }
813
814    if (IsCodecCN(&send_codec)) {
815      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id,
816                   "DTX cannot be secondary codec");
817      *mirror_id = -1;
818      return -1;
819    }
820  }
821  return codec_id;
822}
823
824int AudioCodingModuleImpl::RegisterSecondarySendCodec(
825    const CodecInst& send_codec) {
826  CriticalSectionScoped lock(acm_crit_sect_);
827  if (!send_codec_registered_) {
828    return -1;
829  }
830  // Primary and Secondary codecs should have the same sampling rates.
831  if (send_codec.plfreq != send_codec_inst_.plfreq) {
832    return -1;
833  }
834  int mirror_id;
835  int codec_id = IsValidSendCodec(send_codec, false, id_, &mirror_id);
836  if (codec_id < 0) {
837    return -1;
838  }
839  ACMGenericCodec* encoder = CreateCodec(send_codec);
840  WebRtcACMCodecParams codec_params;
841  // Initialize the codec before registering. For secondary codec VAD & DTX are
842  // disabled.
843  memcpy(&(codec_params.codec_inst), &send_codec, sizeof(CodecInst));
844  codec_params.enable_vad = false;
845  codec_params.enable_dtx = false;
846  codec_params.vad_mode = VADNormal;
847  // Force initialization.
848  if (encoder->InitEncoder(&codec_params, true) < 0) {
849    // Could not initialize, therefore cannot be registered.
850    delete encoder;
851    return -1;
852  }
853  secondary_encoder_.reset(encoder);
854  memcpy(&secondary_send_codec_inst_, &send_codec, sizeof(send_codec));
855
856  // Disable VAD & DTX.
857  SetVADSafe(false, false, VADNormal);
858
859  // Cleaning.
860  if (red_buffer_) {
861    memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE);
862  }
863  ResetFragmentation(0);
864  return 0;
865}
866
867void AudioCodingModuleImpl::UnregisterSecondarySendCodec() {
868  CriticalSectionScoped lock(acm_crit_sect_);
869  if (secondary_encoder_.get() == NULL) {
870    return;
871  }
872  secondary_encoder_.reset();
873  ResetFragmentation(0);
874}
875
876int AudioCodingModuleImpl::SecondarySendCodec(
877    CodecInst* secondary_codec) const {
878  CriticalSectionScoped lock(acm_crit_sect_);
879  if (secondary_encoder_.get() == NULL) {
880    return -1;
881  }
882  memcpy(secondary_codec, &secondary_send_codec_inst_,
883         sizeof(secondary_send_codec_inst_));
884  return 0;
885}
886
887// Can be called multiple times for Codec, CNG, RED.
888int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
889  int mirror_id;
890  int codec_id = IsValidSendCodec(send_codec, true, id_, &mirror_id);
891
892  CriticalSectionScoped lock(acm_crit_sect_);
893
894  // Check for reported errors from function IsValidSendCodec().
895  if (codec_id < 0) {
896    if (!send_codec_registered_) {
897      // This values has to be NULL if there is no codec registered.
898      current_send_codec_idx_ = -1;
899    }
900    return -1;
901  }
902
903  // RED can be registered with other payload type. If not registered a default
904  // payload type is used.
905  if (IsCodecRED(&send_codec)) {
906    // TODO(tlegrand): Remove this check. Already taken care of in
907    // ACMCodecDB::CodecNumber().
908    // Check if the payload-type is valid
909    if (!ACMCodecDB::ValidPayloadType(send_codec.pltype)) {
910      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
911                   "Invalid payload-type %d for %s.", send_codec.pltype,
912                   send_codec.plname);
913      return -1;
914    }
915    // Set RED payload type.
916    red_pltype_ = static_cast<uint8_t>(send_codec.pltype);
917    return 0;
918  }
919
920  // CNG can be registered with other payload type. If not registered the
921  // default payload types from codec database will be used.
922  if (IsCodecCN(&send_codec)) {
923    // CNG is registered.
924    switch (send_codec.plfreq) {
925      case 8000: {
926        cng_nb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
927        break;
928      }
929      case 16000: {
930        cng_wb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
931        break;
932      }
933      case 32000: {
934        cng_swb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
935        break;
936      }
937      case 48000: {
938        cng_fb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
939        break;
940      }
941      default: {
942        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
943                     "RegisterSendCodec() failed, invalid frequency for CNG "
944                     "registration");
945        return -1;
946      }
947    }
948    return 0;
949  }
950
951  // Set Stereo, and make sure VAD and DTX is turned off.
952  if (send_codec.channels == 2) {
953    stereo_send_ = true;
954    if (vad_enabled_ || dtx_enabled_) {
955      WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
956                   "VAD/DTX is turned off, not supported when sending stereo.");
957    }
958    vad_enabled_ = false;
959    dtx_enabled_ = false;
960  } else {
961    stereo_send_ = false;
962  }
963
964  // Check if the codec is already registered as send codec.
965  bool is_send_codec;
966  if (send_codec_registered_) {
967    int send_codec_mirror_id;
968    int send_codec_id = ACMCodecDB::CodecNumber(send_codec_inst_,
969                                                &send_codec_mirror_id);
970    assert(send_codec_id >= 0);
971    is_send_codec = (send_codec_id == codec_id) ||
972        (mirror_id == send_codec_mirror_id);
973  } else {
974    is_send_codec = false;
975  }
976
977  // If there is secondary codec registered and the new send codec has a
978  // sampling rate different than that of secondary codec, then unregister the
979  // secondary codec.
980  if (secondary_encoder_.get() != NULL &&
981      secondary_send_codec_inst_.plfreq != send_codec.plfreq) {
982    secondary_encoder_.reset();
983    ResetFragmentation(0);
984  }
985
986  // If new codec, or new settings, register.
987  if (!is_send_codec) {
988    if (codecs_[mirror_id] == NULL) {
989      codecs_[mirror_id] = CreateCodec(send_codec);
990      if (codecs_[mirror_id] == NULL) {
991        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
992                     "Cannot Create the codec");
993        return -1;
994      }
995      mirror_codec_idx_[mirror_id] = mirror_id;
996    }
997
998    if (mirror_id != codec_id) {
999      codecs_[codec_id] = codecs_[mirror_id];
1000      mirror_codec_idx_[codec_id] = mirror_id;
1001    }
1002
1003    ACMGenericCodec* codec_ptr = codecs_[codec_id];
1004    WebRtcACMCodecParams codec_params;
1005
1006    memcpy(&(codec_params.codec_inst), &send_codec, sizeof(CodecInst));
1007    codec_params.enable_vad = vad_enabled_;
1008    codec_params.enable_dtx = dtx_enabled_;
1009    codec_params.vad_mode = vad_mode_;
1010    // Force initialization.
1011    if (codec_ptr->InitEncoder(&codec_params, true) < 0) {
1012      // Could not initialize the encoder.
1013
1014      // Check if already have a registered codec.
1015      // Depending on that different messages are logged.
1016      if (!send_codec_registered_) {
1017        current_send_codec_idx_ = -1;
1018        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1019                     "Cannot Initialize the encoder No Encoder is registered");
1020      } else {
1021        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1022                     "Cannot Initialize the encoder, continue encoding with "
1023                     "the previously registered codec");
1024      }
1025      return -1;
1026    }
1027
1028    // Update states.
1029    dtx_enabled_ = codec_params.enable_dtx;
1030    vad_enabled_ = codec_params.enable_vad;
1031    vad_mode_ = codec_params.vad_mode;
1032
1033    // Everything is fine so we can replace the previous codec with this one.
1034    if (send_codec_registered_) {
1035      // If we change codec we start fresh with RED.
1036      // This is not strictly required by the standard.
1037      is_first_red_ = true;
1038      codec_ptr->SetVAD(&dtx_enabled_, &vad_enabled_, &vad_mode_);
1039
1040      if (!codec_ptr->HasInternalFEC()) {
1041        codec_fec_enabled_ = false;
1042      } else {
1043        if (codec_ptr->SetFEC(codec_fec_enabled_) < 0) {
1044          WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1045                       "Cannot set codec FEC");
1046          return -1;
1047        }
1048      }
1049    }
1050
1051    current_send_codec_idx_ = codec_id;
1052    send_codec_registered_ = true;
1053    memcpy(&send_codec_inst_, &send_codec, sizeof(CodecInst));
1054    previous_pltype_ = send_codec_inst_.pltype;
1055    return 0;
1056  } else {
1057    // If codec is the same as already registered check if any parameters
1058    // has changed compared to the current values.
1059    // If any parameter is valid then apply it and record.
1060    bool force_init = false;
1061
1062    if (mirror_id != codec_id) {
1063      codecs_[codec_id] = codecs_[mirror_id];
1064      mirror_codec_idx_[codec_id] = mirror_id;
1065    }
1066
1067    // Check the payload type.
1068    if (send_codec.pltype != send_codec_inst_.pltype) {
1069      // At this point check if the given payload type is valid.
1070      // Record it later when the sampling frequency is changed
1071      // successfully.
1072      if (!ACMCodecDB::ValidPayloadType(send_codec.pltype)) {
1073        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1074                     "Out of range payload type");
1075        return -1;
1076      }
1077    }
1078
1079    // If there is a codec that ONE instance of codec supports multiple
1080    // sampling frequencies, then we need to take care of it here.
1081    // one such a codec is iSAC. Both WB and SWB are encoded and decoded
1082    // with one iSAC instance. Therefore, we need to update the encoder
1083    // frequency if required.
1084    if (send_codec_inst_.plfreq != send_codec.plfreq) {
1085      force_init = true;
1086
1087      // If sampling frequency is changed we have to start fresh with RED.
1088      is_first_red_ = true;
1089    }
1090
1091    // If packet size or number of channels has changed, we need to
1092    // re-initialize the encoder.
1093    if (send_codec_inst_.pacsize != send_codec.pacsize) {
1094      force_init = true;
1095    }
1096    if (send_codec_inst_.channels != send_codec.channels) {
1097      force_init = true;
1098    }
1099
1100    if (force_init) {
1101      WebRtcACMCodecParams codec_params;
1102
1103      memcpy(&(codec_params.codec_inst), &send_codec, sizeof(CodecInst));
1104      codec_params.enable_vad = vad_enabled_;
1105      codec_params.enable_dtx = dtx_enabled_;
1106      codec_params.vad_mode = vad_mode_;
1107
1108      // Force initialization.
1109      if (codecs_[current_send_codec_idx_]->InitEncoder(&codec_params,
1110                                                        true) < 0) {
1111        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1112                     "Could not change the codec packet-size.");
1113        return -1;
1114      }
1115
1116      send_codec_inst_.plfreq = send_codec.plfreq;
1117      send_codec_inst_.pacsize = send_codec.pacsize;
1118      send_codec_inst_.channels = send_codec.channels;
1119    }
1120
1121    // If the change of sampling frequency has been successful then
1122    // we store the payload-type.
1123    send_codec_inst_.pltype = send_codec.pltype;
1124
1125    // Check if a change in Rate is required.
1126    if (send_codec.rate != send_codec_inst_.rate) {
1127      if (codecs_[codec_id]->SetBitRate(send_codec.rate) < 0) {
1128        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1129                     "Could not change the codec rate.");
1130        return -1;
1131      }
1132      send_codec_inst_.rate = send_codec.rate;
1133    }
1134
1135    if (!codecs_[codec_id]->HasInternalFEC()) {
1136      codec_fec_enabled_ = false;
1137    } else {
1138      if (codecs_[codec_id]->SetFEC(codec_fec_enabled_) < 0) {
1139        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1140                     "Cannot set codec FEC");
1141        return -1;
1142      }
1143    }
1144
1145    previous_pltype_ = send_codec_inst_.pltype;
1146    return 0;
1147  }
1148}
1149
1150// Get current send codec.
1151int AudioCodingModuleImpl::SendCodec(
1152    CodecInst* current_codec) const {
1153  WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
1154               "SendCodec()");
1155  CriticalSectionScoped lock(acm_crit_sect_);
1156
1157  if (!send_codec_registered_) {
1158    WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
1159                 "SendCodec Failed, no codec is registered");
1160    return -1;
1161  }
1162  WebRtcACMCodecParams encoder_param;
1163  codecs_[current_send_codec_idx_]->EncoderParams(&encoder_param);
1164  encoder_param.codec_inst.pltype = send_codec_inst_.pltype;
1165  memcpy(current_codec, &(encoder_param.codec_inst), sizeof(CodecInst));
1166
1167  return 0;
1168}
1169
1170// Get current send frequency.
1171int AudioCodingModuleImpl::SendFrequency() const {
1172  WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
1173               "SendFrequency()");
1174  CriticalSectionScoped lock(acm_crit_sect_);
1175
1176  if (!send_codec_registered_) {
1177    WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
1178                 "SendFrequency Failed, no codec is registered");
1179    return -1;
1180  }
1181
1182  return send_codec_inst_.plfreq;
1183}
1184
1185// Get encode bitrate.
1186// Adaptive rate codecs return their current encode target rate, while other
1187// codecs return there longterm avarage or their fixed rate.
1188int AudioCodingModuleImpl::SendBitrate() const {
1189  CriticalSectionScoped lock(acm_crit_sect_);
1190
1191  if (!send_codec_registered_) {
1192    WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
1193                 "SendBitrate Failed, no codec is registered");
1194    return -1;
1195  }
1196
1197  WebRtcACMCodecParams encoder_param;
1198  codecs_[current_send_codec_idx_]->EncoderParams(&encoder_param);
1199
1200  return encoder_param.codec_inst.rate;
1201}
1202
1203// Set available bandwidth, inform the encoder about the estimated bandwidth
1204// received from the remote party.
1205int AudioCodingModuleImpl::SetReceivedEstimatedBandwidth(int bw) {
1206  CriticalSectionScoped lock(acm_crit_sect_);
1207  return codecs_[current_send_codec_idx_]->SetEstimatedBandwidth(bw);
1208}
1209
1210// Register a transport callback which will be called to deliver
1211// the encoded buffers.
1212int AudioCodingModuleImpl::RegisterTransportCallback(
1213    AudioPacketizationCallback* transport) {
1214  CriticalSectionScoped lock(callback_crit_sect_);
1215  packetization_callback_ = transport;
1216  return 0;
1217}
1218
1219// Add 10MS of raw (PCM) audio data to the encoder.
1220int AudioCodingModuleImpl::Add10MsData(
1221    const AudioFrame& audio_frame) {
1222  if (audio_frame.samples_per_channel_ <= 0) {
1223    assert(false);
1224    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1225                 "Cannot Add 10 ms audio, payload length is negative or "
1226                 "zero");
1227    return -1;
1228  }
1229
1230  if (audio_frame.sample_rate_hz_ > 48000) {
1231    assert(false);
1232    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1233                 "Cannot Add 10 ms audio, input frequency not valid");
1234    return -1;
1235  }
1236
1237  // If the length and frequency matches. We currently just support raw PCM.
1238  if ((audio_frame.sample_rate_hz_ / 100)
1239      != audio_frame.samples_per_channel_) {
1240    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1241                 "Cannot Add 10 ms audio, input frequency and length doesn't"
1242                 " match");
1243    return -1;
1244  }
1245
1246  if (audio_frame.num_channels_ != 1 && audio_frame.num_channels_ != 2) {
1247    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1248                 "Cannot Add 10 ms audio, invalid number of channels.");
1249    return -1;
1250  }
1251
1252  CriticalSectionScoped lock(acm_crit_sect_);
1253  // Do we have a codec registered?
1254  if (!HaveValidEncoder("Add10MsData")) {
1255    return -1;
1256  }
1257
1258  const AudioFrame* ptr_frame;
1259  // Perform a resampling, also down-mix if it is required and can be
1260  // performed before resampling (a down mix prior to resampling will take
1261  // place if both primary and secondary encoders are mono and input is in
1262  // stereo).
1263  if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) {
1264    return -1;
1265  }
1266
1267  // Check whether we need an up-mix or down-mix?
1268  bool remix = ptr_frame->num_channels_ != send_codec_inst_.channels;
1269  if (secondary_encoder_.get() != NULL) {
1270    remix = remix ||
1271        (ptr_frame->num_channels_ != secondary_send_codec_inst_.channels);
1272  }
1273
1274  // If a re-mix is required (up or down), this buffer will store re-mixed
1275  // version of the input.
1276  int16_t buffer[WEBRTC_10MS_PCM_AUDIO];
1277  if (remix) {
1278    if (ptr_frame->num_channels_ == 1) {
1279      if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, buffer) < 0)
1280        return -1;
1281    } else {
1282      if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, buffer) < 0)
1283        return -1;
1284    }
1285  }
1286
1287  // When adding data to encoders this pointer is pointing to an audio buffer
1288  // with correct number of channels.
1289  const int16_t* ptr_audio = ptr_frame->data_;
1290
1291  // For pushing data to primary, point the |ptr_audio| to correct buffer.
1292  if (send_codec_inst_.channels != ptr_frame->num_channels_)
1293    ptr_audio = buffer;
1294
1295  if (codecs_[current_send_codec_idx_]->Add10MsData(
1296      ptr_frame->timestamp_, ptr_audio, ptr_frame->samples_per_channel_,
1297      send_codec_inst_.channels) < 0)
1298    return -1;
1299
1300  if (secondary_encoder_.get() != NULL) {
1301    // For pushing data to secondary, point the |ptr_audio| to correct buffer.
1302    ptr_audio = ptr_frame->data_;
1303    if (secondary_send_codec_inst_.channels != ptr_frame->num_channels_)
1304      ptr_audio = buffer;
1305
1306    if (secondary_encoder_->Add10MsData(
1307        ptr_frame->timestamp_, ptr_audio, ptr_frame->samples_per_channel_,
1308        secondary_send_codec_inst_.channels) < 0)
1309      return -1;
1310  }
1311
1312  return 0;
1313}
1314
1315// Perform a resampling and down-mix if required. We down-mix only if
1316// encoder is mono and input is stereo. In case of dual-streaming, both
1317// encoders has to be mono for down-mix to take place.
1318// |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing
1319// is required, |*ptr_out| points to |in_frame|.
1320int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
1321                                               const AudioFrame** ptr_out) {
1322  // Primary and secondary (if exists) should have the same sampling rate.
1323  assert((secondary_encoder_.get() != NULL) ?
1324      secondary_send_codec_inst_.plfreq == send_codec_inst_.plfreq : true);
1325
1326  bool resample = (in_frame.sample_rate_hz_ != send_codec_inst_.plfreq);
1327
1328  // This variable is true if primary codec and secondary codec (if exists)
1329  // are both mono and input is stereo.
1330  bool down_mix;
1331  if (secondary_encoder_.get() != NULL) {
1332    down_mix = (in_frame.num_channels_ == 2) &&
1333        (send_codec_inst_.channels == 1) &&
1334        (secondary_send_codec_inst_.channels == 1);
1335  } else {
1336    down_mix = (in_frame.num_channels_ == 2) &&
1337        (send_codec_inst_.channels == 1);
1338  }
1339
1340  if (!first_10ms_data_) {
1341    expected_in_ts_ = in_frame.timestamp_;
1342    expected_codec_ts_ = in_frame.timestamp_;
1343    first_10ms_data_ = true;
1344  } else if (in_frame.timestamp_ != expected_in_ts_) {
1345    // TODO(turajs): Do we need a warning here.
1346    expected_codec_ts_ += (in_frame.timestamp_ - expected_in_ts_) *
1347        static_cast<uint32_t>((static_cast<double>(send_codec_inst_.plfreq) /
1348                    static_cast<double>(in_frame.sample_rate_hz_)));
1349    expected_in_ts_ = in_frame.timestamp_;
1350  }
1351
1352
1353  if (!down_mix && !resample) {
1354    // No pre-processing is required.
1355    expected_in_ts_ += in_frame.samples_per_channel_;
1356    expected_codec_ts_ += in_frame.samples_per_channel_;
1357    *ptr_out = &in_frame;
1358    return 0;
1359  }
1360
1361  *ptr_out = &preprocess_frame_;
1362  preprocess_frame_.num_channels_ = in_frame.num_channels_;
1363  int16_t audio[WEBRTC_10MS_PCM_AUDIO];
1364  const int16_t* src_ptr_audio = in_frame.data_;
1365  int16_t* dest_ptr_audio = preprocess_frame_.data_;
1366  if (down_mix) {
1367    // If a resampling is required the output of a down-mix is written into a
1368    // local buffer, otherwise, it will be written to the output frame.
1369    if (resample)
1370      dest_ptr_audio = audio;
1371    if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0)
1372      return -1;
1373    preprocess_frame_.num_channels_ = 1;
1374    // Set the input of the resampler is the down-mixed signal.
1375    src_ptr_audio = audio;
1376  }
1377
1378  preprocess_frame_.timestamp_ = expected_codec_ts_;
1379  preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_;
1380  preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_;
1381  // If it is required, we have to do a resampling.
1382  if (resample) {
1383    // The result of the resampler is written to output frame.
1384    dest_ptr_audio = preprocess_frame_.data_;
1385
1386    preprocess_frame_.samples_per_channel_ =
1387        resampler_.Resample10Msec(src_ptr_audio,
1388                                  in_frame.sample_rate_hz_,
1389                                  send_codec_inst_.plfreq,
1390                                  preprocess_frame_.num_channels_,
1391                                  AudioFrame::kMaxDataSizeSamples,
1392                                  dest_ptr_audio);
1393
1394    if (preprocess_frame_.samples_per_channel_ < 0) {
1395      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1396                   "Cannot add 10 ms audio, resampling failed");
1397      return -1;
1398    }
1399    preprocess_frame_.sample_rate_hz_ = send_codec_inst_.plfreq;
1400  }
1401
1402  expected_codec_ts_ += preprocess_frame_.samples_per_channel_;
1403  expected_in_ts_ += in_frame.samples_per_channel_;
1404
1405  return 0;
1406}
1407
1408/////////////////////////////////////////
1409//   (RED) Redundant Coding
1410//
1411
1412bool AudioCodingModuleImpl::REDStatus() const {
1413  CriticalSectionScoped lock(acm_crit_sect_);
1414
1415  return red_enabled_;
1416}
1417
1418// Configure RED status i.e on/off.
1419int AudioCodingModuleImpl::SetREDStatus(
1420#ifdef WEBRTC_CODEC_RED
1421    bool enable_red) {
1422  CriticalSectionScoped lock(acm_crit_sect_);
1423
1424  if (enable_red == true && codec_fec_enabled_ == true) {
1425    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
1426                 "Codec internal FEC and RED cannot be co-enabled.");
1427    return -1;
1428  }
1429
1430  if (red_enabled_ != enable_red) {
1431    // Reset the RED buffer.
1432    memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE);
1433
1434    // Reset fragmentation buffers.
1435    ResetFragmentation(kNumRedFragmentationVectors);
1436    // Set red_enabled_.
1437    red_enabled_ = enable_red;
1438  }
1439  is_first_red_ = true;  // Make sure we restart RED.
1440  return 0;
1441#else
1442    bool /* enable_red */) {
1443  red_enabled_ = false;
1444  WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
1445               "  WEBRTC_CODEC_RED is undefined => red_enabled_ = %d",
1446               red_enabled_);
1447  return -1;
1448#endif
1449}
1450
1451/////////////////////////////////////////
1452//   (FEC) Forward Error Correction (codec internal)
1453//
1454
1455bool AudioCodingModuleImpl::CodecFEC() const {
1456  CriticalSectionScoped lock(acm_crit_sect_);
1457  return codec_fec_enabled_;
1458}
1459
1460int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
1461  CriticalSectionScoped lock(acm_crit_sect_);
1462
1463  if (enable_codec_fec == true && red_enabled_ == true) {
1464    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
1465                 "Codec internal FEC and RED cannot be co-enabled.");
1466    return -1;
1467  }
1468
1469  // Set codec FEC.
1470  if (HaveValidEncoder("SetCodecFEC") &&
1471      codecs_[current_send_codec_idx_]->SetFEC(enable_codec_fec) < 0) {
1472      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1473                   "Set codec internal FEC failed.");
1474    return -1;
1475  }
1476  codec_fec_enabled_ = enable_codec_fec;
1477  return 0;
1478}
1479
1480int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
1481  CriticalSectionScoped lock(acm_crit_sect_);
1482  if (HaveValidEncoder("SetPacketLossRate") &&
1483      codecs_[current_send_codec_idx_]->SetPacketLossRate(loss_rate) < 0) {
1484      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1485                   "Set packet loss rate failed.");
1486    return -1;
1487  }
1488  return 0;
1489}
1490
1491/////////////////////////////////////////
1492//   (VAD) Voice Activity Detection
1493//
1494int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
1495                                  bool enable_vad,
1496                                  ACMVADMode mode) {
1497  CriticalSectionScoped lock(acm_crit_sect_);
1498  return SetVADSafe(enable_dtx, enable_vad, mode);
1499}
1500
1501int AudioCodingModuleImpl::SetVADSafe(bool enable_dtx,
1502                                      bool enable_vad,
1503                                      ACMVADMode mode) {
1504  // Sanity check of the mode.
1505  if ((mode != VADNormal) && (mode != VADLowBitrate)
1506      && (mode != VADAggr) && (mode != VADVeryAggr)) {
1507    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1508                 "Invalid VAD Mode %d, no change is made to VAD/DTX status",
1509                 mode);
1510    return -1;
1511  }
1512
1513  // Check that the send codec is mono. We don't support VAD/DTX for stereo
1514  // sending.
1515  if ((enable_dtx || enable_vad) && stereo_send_) {
1516    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1517                 "VAD/DTX not supported for stereo sending");
1518    dtx_enabled_ = false;
1519    vad_enabled_ = false;
1520    vad_mode_ = mode;
1521    return -1;
1522  }
1523
1524  // We don't support VAD/DTX when dual-streaming is enabled, i.e.
1525  // secondary-encoder is registered.
1526  if ((enable_dtx || enable_vad) && secondary_encoder_.get() != NULL) {
1527    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1528                 "VAD/DTX not supported when dual-streaming is enabled.");
1529    dtx_enabled_ = false;
1530    vad_enabled_ = false;
1531    vad_mode_ = mode;
1532    return -1;
1533  }
1534
1535  // Store VAD/DTX settings. Values can be changed in the call to "SetVAD"
1536  // below.
1537  dtx_enabled_ = enable_dtx;
1538  vad_enabled_ = enable_vad;
1539  vad_mode_ = mode;
1540
1541  // If a send codec is registered, set VAD/DTX for the codec.
1542  if (HaveValidEncoder("SetVAD") && codecs_[current_send_codec_idx_]->SetVAD(
1543      &dtx_enabled_, &vad_enabled_,  &vad_mode_) < 0) {
1544      // SetVAD failed.
1545      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1546                   "SetVAD failed");
1547      vad_enabled_ = false;
1548      dtx_enabled_ = false;
1549      return -1;
1550  }
1551  return 0;
1552}
1553
1554// Get VAD/DTX settings.
1555int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
1556                               ACMVADMode* mode) const {
1557  CriticalSectionScoped lock(acm_crit_sect_);
1558
1559  *dtx_enabled = dtx_enabled_;
1560  *vad_enabled = vad_enabled_;
1561  *mode = vad_mode_;
1562
1563  return 0;
1564}
1565
1566/////////////////////////////////////////
1567//   Receiver
1568//
1569
1570int AudioCodingModuleImpl::InitializeReceiver() {
1571  CriticalSectionScoped lock(acm_crit_sect_);
1572  return InitializeReceiverSafe();
1573}
1574
1575// Initialize receiver, resets codec database etc.
1576int AudioCodingModuleImpl::InitializeReceiverSafe() {
1577  // If the receiver is already initialized then we want to destroy any
1578  // existing decoders. After a call to this function, we should have a clean
1579  // start-up.
1580  if (receiver_initialized_) {
1581    if (receiver_.RemoveAllCodecs() < 0)
1582      return -1;
1583  }
1584  receiver_.set_id(id_);
1585  receiver_.ResetInitialDelay();
1586  receiver_.SetMinimumDelay(0);
1587  receiver_.SetMaximumDelay(0);
1588  receiver_.FlushBuffers();
1589
1590  // Register RED and CN.
1591  for (int i = 0; i < ACMCodecDB::kNumCodecs; i++) {
1592    if (IsCodecRED(i) || IsCodecCN(i)) {
1593      uint8_t pl_type = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
1594      if (receiver_.AddCodec(i, pl_type, 1, NULL) < 0) {
1595        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1596                     "Cannot register master codec.");
1597        return -1;
1598      }
1599    }
1600  }
1601  receiver_initialized_ = true;
1602  return 0;
1603}
1604
1605// TODO(turajs): If NetEq opens an API for reseting the state of decoders then
1606// implement this method. Otherwise it should be removed. I might be that by
1607// removing and registering a decoder we can achieve the effect of resetting.
1608// Reset the decoder state.
1609int AudioCodingModuleImpl::ResetDecoder() {
1610  return 0;
1611}
1612
1613// Get current receive frequency.
1614int AudioCodingModuleImpl::ReceiveFrequency() const {
1615  WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
1616               "ReceiveFrequency()");
1617
1618  CriticalSectionScoped lock(acm_crit_sect_);
1619
1620  int codec_id = receiver_.last_audio_codec_id();
1621
1622  return codec_id < 0 ? receiver_.current_sample_rate_hz() :
1623                        ACMCodecDB::database_[codec_id].plfreq;
1624}
1625
1626// Get current playout frequency.
1627int AudioCodingModuleImpl::PlayoutFrequency() const {
1628  WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
1629               "PlayoutFrequency()");
1630
1631  CriticalSectionScoped lock(acm_crit_sect_);
1632
1633  return receiver_.current_sample_rate_hz();
1634}
1635
1636// Register possible receive codecs, can be called multiple times,
1637// for codecs, CNG (NB, WB and SWB), DTMF, RED.
1638int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
1639  CriticalSectionScoped lock(acm_crit_sect_);
1640
1641  if (codec.channels > 2 || codec.channels < 0) {
1642    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1643                 "Unsupported number of channels, %d.", codec.channels);
1644    return -1;
1645  }
1646
1647  // TODO(turajs) do we need this for NetEq 4?
1648  if (!receiver_initialized_) {
1649    if (InitializeReceiverSafe() < 0) {
1650      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1651                   "Cannot initialize receiver, failed registering codec.");
1652      return -1;
1653    }
1654  }
1655
1656  int mirror_id;
1657  int codec_id = ACMCodecDB::ReceiverCodecNumber(codec, &mirror_id);
1658
1659  if (codec_id < 0 || codec_id >= ACMCodecDB::kNumCodecs) {
1660    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1661                 "Wrong codec params to be registered as receive codec");
1662    return -1;
1663  }
1664
1665  // Check if the payload-type is valid.
1666  if (!ACMCodecDB::ValidPayloadType(codec.pltype)) {
1667    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1668                 "Invalid payload-type %d for %s.", codec.pltype,
1669                 codec.plname);
1670    return -1;
1671  }
1672
1673  AudioDecoder* decoder = NULL;
1674  // Get |decoder| associated with |codec|. |decoder| can be NULL if |codec|
1675  // does not own its decoder.
1676  if (GetAudioDecoder(codec, codec_id, mirror_id, &decoder) < 0) {
1677    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1678                 "Wrong codec params to be registered as receive codec");
1679    return -1;
1680  }
1681  uint8_t payload_type = static_cast<uint8_t>(codec.pltype);
1682  return receiver_.AddCodec(codec_id, payload_type, codec.channels, decoder);
1683}
1684
1685// Get current received codec.
1686int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const {
1687  return receiver_.LastAudioCodec(current_codec);
1688}
1689
1690// Incoming packet from network parsed and ready for decode.
1691int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload,
1692                                          const int payload_length,
1693                                          const WebRtcRTPHeader& rtp_header) {
1694  if (payload_length < 0) {
1695    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1696                 "IncomingPacket() Error, payload-length cannot be negative");
1697    return -1;
1698  }
1699  int last_audio_pltype = receiver_.last_audio_payload_type();
1700  if (receiver_.InsertPacket(rtp_header, incoming_payload, payload_length) <
1701      0) {
1702    return -1;
1703  }
1704  if (receiver_.last_audio_payload_type() != last_audio_pltype) {
1705    int index = receiver_.last_audio_codec_id();
1706    assert(index >= 0);
1707    CriticalSectionScoped lock(acm_crit_sect_);
1708
1709    // |codec_[index]| might not be even created, simply because it is not
1710    // yet registered as send codec. Even if it is registered, unless the
1711    // codec shares same instance for encoder and decoder, this call is
1712    // useless.
1713    if (codecs_[index] != NULL)
1714      codecs_[index]->UpdateDecoderSampFreq(index);
1715  }
1716  return 0;
1717}
1718
1719// Minimum playout delay (Used for lip-sync).
1720int AudioCodingModuleImpl::SetMinimumPlayoutDelay(int time_ms) {
1721  if ((time_ms < 0) || (time_ms > 10000)) {
1722    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1723                 "Delay must be in the range of 0-1000 milliseconds.");
1724    return -1;
1725  }
1726  return receiver_.SetMinimumDelay(time_ms);
1727}
1728
1729int AudioCodingModuleImpl::SetMaximumPlayoutDelay(int time_ms) {
1730  if ((time_ms < 0) || (time_ms > 10000)) {
1731    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1732                 "Delay must be in the range of 0-1000 milliseconds.");
1733    return -1;
1734  }
1735  return receiver_.SetMaximumDelay(time_ms);
1736}
1737
1738// Estimate the Bandwidth based on the incoming stream, needed for one way
1739// audio where the RTCP send the BW estimate.
1740// This is also done in the RTP module.
1741int AudioCodingModuleImpl::DecoderEstimatedBandwidth() const {
1742  // We can estimate far-end to near-end bandwidth if the iSAC are sent. Check
1743  // if the last received packets were iSAC packet then retrieve the bandwidth.
1744  int last_audio_codec_id = receiver_.last_audio_codec_id();
1745  if (last_audio_codec_id >= 0 &&
1746      STR_CASE_CMP("ISAC", ACMCodecDB::database_[last_audio_codec_id].plname)) {
1747    CriticalSectionScoped lock(acm_crit_sect_);
1748    return codecs_[last_audio_codec_id]->GetEstimatedBandwidth();
1749  }
1750  return -1;
1751}
1752
1753// Set playout mode for: voice, fax, streaming or off.
1754int AudioCodingModuleImpl::SetPlayoutMode(AudioPlayoutMode mode) {
1755  receiver_.SetPlayoutMode(mode);
1756  return 0;  // TODO(turajs): return value is for backward compatibility.
1757}
1758
1759// Get playout mode voice, fax, streaming or off.
1760AudioPlayoutMode AudioCodingModuleImpl::PlayoutMode() const {
1761  return receiver_.PlayoutMode();
1762}
1763
1764// Get 10 milliseconds of raw audio data to play out.
1765// Automatic resample to the requested frequency.
1766int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
1767                                           AudioFrame* audio_frame) {
1768  // GetAudio always returns 10 ms, at the requested sample rate.
1769  if (receiver_.GetAudio(desired_freq_hz, audio_frame) != 0) {
1770    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1771                 "PlayoutData failed, RecOut Failed");
1772    return -1;
1773  }
1774
1775  audio_frame->id_ = id_;
1776  return 0;
1777}
1778
1779/////////////////////////////////////////
1780//   Statistics
1781//
1782
1783// TODO(turajs) change the return value to void. Also change the corresponding
1784// NetEq function.
1785int AudioCodingModuleImpl::NetworkStatistics(ACMNetworkStatistics* statistics) {
1786  receiver_.NetworkStatistics(statistics);
1787  return 0;
1788}
1789
1790int AudioCodingModuleImpl::RegisterVADCallback(ACMVADCallback* vad_callback) {
1791  WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_,
1792               "RegisterVADCallback()");
1793  CriticalSectionScoped lock(callback_crit_sect_);
1794  vad_callback_ = vad_callback;
1795  return 0;
1796}
1797
1798// TODO(tlegrand): Modify this function to work for stereo, and add tests.
1799int AudioCodingModuleImpl::IncomingPayload(const uint8_t* incoming_payload,
1800                                           int payload_length,
1801                                           uint8_t payload_type,
1802                                           uint32_t timestamp) {
1803  if (payload_length < 0) {
1804    // Log error in trace file.
1805    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1806                 "IncomingPacket() Error, payload-length cannot be negative");
1807    return -1;
1808  }
1809
1810  // We are not acquiring any lock when interacting with |aux_rtp_header_| no
1811  // other method uses this member variable.
1812  if (aux_rtp_header_ == NULL) {
1813    // This is the first time that we are using |dummy_rtp_header_|
1814    // so we have to create it.
1815    aux_rtp_header_ = new WebRtcRTPHeader;
1816    aux_rtp_header_->header.payloadType = payload_type;
1817    // Don't matter in this case.
1818    aux_rtp_header_->header.ssrc = 0;
1819    aux_rtp_header_->header.markerBit = false;
1820    // Start with random numbers.
1821    aux_rtp_header_->header.sequenceNumber = 0x1234;  // Arbitrary.
1822    aux_rtp_header_->type.Audio.channel = 1;
1823  }
1824
1825  aux_rtp_header_->header.timestamp = timestamp;
1826  IncomingPacket(incoming_payload, payload_length, *aux_rtp_header_);
1827  // Get ready for the next payload.
1828  aux_rtp_header_->header.sequenceNumber++;
1829  return 0;
1830}
1831
1832int AudioCodingModuleImpl::ReplaceInternalDTXWithWebRtc(bool use_webrtc_dtx) {
1833  CriticalSectionScoped lock(acm_crit_sect_);
1834
1835  if (!HaveValidEncoder("ReplaceInternalDTXWithWebRtc")) {
1836    WEBRTC_TRACE(
1837        webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1838        "Cannot replace codec internal DTX when no send codec is registered.");
1839    return -1;
1840  }
1841
1842  int res = codecs_[current_send_codec_idx_]->ReplaceInternalDTX(
1843      use_webrtc_dtx);
1844  // Check if VAD is turned on, or if there is any error.
1845  if (res == 1) {
1846    vad_enabled_ = true;
1847  } else if (res < 0) {
1848    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1849                 "Failed to set ReplaceInternalDTXWithWebRtc(%d)",
1850                 use_webrtc_dtx);
1851    return res;
1852  }
1853
1854  return 0;
1855}
1856
1857int AudioCodingModuleImpl::IsInternalDTXReplacedWithWebRtc(
1858    bool* uses_webrtc_dtx) {
1859  CriticalSectionScoped lock(acm_crit_sect_);
1860
1861  if (!HaveValidEncoder("IsInternalDTXReplacedWithWebRtc")) {
1862    return -1;
1863  }
1864  if (codecs_[current_send_codec_idx_]->IsInternalDTXReplaced(uses_webrtc_dtx)
1865      < 0) {
1866    return -1;
1867  }
1868  return 0;
1869}
1870
1871int AudioCodingModuleImpl::SetISACMaxRate(int max_bit_per_sec) {
1872  CriticalSectionScoped lock(acm_crit_sect_);
1873
1874  if (!HaveValidEncoder("SetISACMaxRate")) {
1875    return -1;
1876  }
1877
1878  return codecs_[current_send_codec_idx_]->SetISACMaxRate(max_bit_per_sec);
1879}
1880
1881int AudioCodingModuleImpl::SetISACMaxPayloadSize(int max_size_bytes) {
1882  CriticalSectionScoped lock(acm_crit_sect_);
1883
1884  if (!HaveValidEncoder("SetISACMaxPayloadSize")) {
1885    return -1;
1886  }
1887
1888  return codecs_[current_send_codec_idx_]->SetISACMaxPayloadSize(
1889      max_size_bytes);
1890}
1891
1892int AudioCodingModuleImpl::ConfigISACBandwidthEstimator(
1893    int frame_size_ms,
1894    int rate_bit_per_sec,
1895    bool enforce_frame_size) {
1896  CriticalSectionScoped lock(acm_crit_sect_);
1897
1898  if (!HaveValidEncoder("ConfigISACBandwidthEstimator")) {
1899    return -1;
1900  }
1901
1902  return codecs_[current_send_codec_idx_]->ConfigISACBandwidthEstimator(
1903      frame_size_ms, rate_bit_per_sec, enforce_frame_size);
1904}
1905
1906// Informs Opus encoder of the maximum playback rate the receiver will render.
1907int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
1908  CriticalSectionScoped lock(acm_crit_sect_);
1909  if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) {
1910    return -1;
1911  }
1912  return codecs_[current_send_codec_idx_]->SetOpusMaxPlaybackRate(frequency_hz);
1913}
1914
1915int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
1916  return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1;
1917}
1918
1919bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
1920  if ((!send_codec_registered_) || (current_send_codec_idx_ < 0) ||
1921      (current_send_codec_idx_ >= ACMCodecDB::kNumCodecs)) {
1922    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1923                 "%s failed: No send codec is registered.", caller_name);
1924    return false;
1925  }
1926  if ((current_send_codec_idx_ < 0) ||
1927      (current_send_codec_idx_ >= ACMCodecDB::kNumCodecs)) {
1928    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1929                 "%s failed: Send codec index out of range.", caller_name);
1930    return false;
1931  }
1932  if (codecs_[current_send_codec_idx_] == NULL) {
1933    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1934                 "%s failed: Send codec is NULL pointer.", caller_name);
1935    return false;
1936  }
1937  return true;
1938}
1939
1940int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) {
1941  return receiver_.RemoveCodec(payload_type);
1942}
1943
1944// TODO(turajs): correct the type of |length_bytes| when it is corrected in
1945// GenericCodec.
1946int AudioCodingModuleImpl::REDPayloadISAC(int isac_rate,
1947                                          int isac_bw_estimate,
1948                                          uint8_t* payload,
1949                                          int16_t* length_bytes) {
1950  CriticalSectionScoped lock(acm_crit_sect_);
1951  if (!HaveValidEncoder("EncodeData")) {
1952    return -1;
1953  }
1954  int status;
1955  status = codecs_[current_send_codec_idx_]->REDPayloadISAC(isac_rate,
1956                                                            isac_bw_estimate,
1957                                                            payload,
1958                                                            length_bytes);
1959  return status;
1960}
1961
1962void AudioCodingModuleImpl::ResetFragmentation(int vector_size) {
1963  for (int n = 0; n < kMaxNumFragmentationVectors; n++) {
1964    fragmentation_.fragmentationOffset[n] = n * MAX_PAYLOAD_SIZE_BYTE;
1965  }
1966  memset(fragmentation_.fragmentationLength, 0, kMaxNumFragmentationVectors *
1967         sizeof(fragmentation_.fragmentationLength[0]));
1968  memset(fragmentation_.fragmentationTimeDiff, 0, kMaxNumFragmentationVectors *
1969         sizeof(fragmentation_.fragmentationTimeDiff[0]));
1970  memset(fragmentation_.fragmentationPlType,
1971         0,
1972         kMaxNumFragmentationVectors *
1973             sizeof(fragmentation_.fragmentationPlType[0]));
1974  fragmentation_.fragmentationVectorSize = static_cast<uint16_t>(vector_size);
1975}
1976
1977int AudioCodingModuleImpl::GetAudioDecoder(const CodecInst& codec, int codec_id,
1978                                           int mirror_id,
1979                                           AudioDecoder** decoder) {
1980  if (ACMCodecDB::OwnsDecoder(codec_id)) {
1981    // This codec has to own its own decoder. Therefore, it should create the
1982    // corresponding AudioDecoder class and insert it into NetEq. If the codec
1983    // does not exist create it.
1984    //
1985    // TODO(turajs): this part of the code is common with RegisterSendCodec(),
1986    //               make a method for it.
1987    if (codecs_[mirror_id] == NULL) {
1988      codecs_[mirror_id] = CreateCodec(codec);
1989      if (codecs_[mirror_id] == NULL) {
1990        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1991                     "Cannot Create the codec");
1992        return -1;
1993      }
1994      mirror_codec_idx_[mirror_id] = mirror_id;
1995    }
1996
1997    if (mirror_id != codec_id) {
1998      codecs_[codec_id] = codecs_[mirror_id];
1999      mirror_codec_idx_[codec_id] = mirror_id;
2000    }
2001    *decoder = codecs_[codec_id]->Decoder(codec_id);
2002    if (!*decoder) {
2003      assert(false);
2004      return -1;
2005    }
2006  } else {
2007    *decoder = NULL;
2008  }
2009
2010  return 0;
2011}
2012
2013int AudioCodingModuleImpl::SetInitialPlayoutDelay(int delay_ms) {
2014  {
2015    CriticalSectionScoped lock(acm_crit_sect_);
2016    // Initialize receiver, if it is not initialized. Otherwise, initial delay
2017    // is reset upon initialization of the receiver.
2018    if (!receiver_initialized_)
2019      InitializeReceiverSafe();
2020  }
2021  return receiver_.SetInitialDelay(delay_ms);
2022}
2023
2024int AudioCodingModuleImpl::EnableNack(size_t max_nack_list_size) {
2025  return receiver_.EnableNack(max_nack_list_size);
2026}
2027
2028void AudioCodingModuleImpl::DisableNack() {
2029  receiver_.DisableNack();
2030}
2031
2032std::vector<uint16_t> AudioCodingModuleImpl::GetNackList(
2033    int round_trip_time_ms) const {
2034  return receiver_.GetNackList(round_trip_time_ms);
2035}
2036
2037int AudioCodingModuleImpl::LeastRequiredDelayMs() const {
2038  return receiver_.LeastRequiredDelayMs();
2039}
2040
2041void AudioCodingModuleImpl::GetDecodingCallStatistics(
2042      AudioDecodingCallStats* call_stats) const {
2043  receiver_.GetDecodingCallStatistics(call_stats);
2044}
2045
2046}  // namespace acm2
2047
2048bool AudioCodingImpl::RegisterSendCodec(AudioEncoder* send_codec) {
2049  FATAL() << "Not implemented yet.";
2050}
2051
2052bool AudioCodingImpl::RegisterSendCodec(int encoder_type,
2053                                        uint8_t payload_type,
2054                                        int frame_size_samples) {
2055  std::string codec_name;
2056  int sample_rate_hz;
2057  int channels;
2058  if (!MapCodecTypeToParameters(
2059          encoder_type, &codec_name, &sample_rate_hz, &channels)) {
2060    return false;
2061  }
2062  webrtc::CodecInst codec;
2063  AudioCodingModule::Codec(
2064      codec_name.c_str(), &codec, sample_rate_hz, channels);
2065  codec.pltype = payload_type;
2066  if (frame_size_samples > 0) {
2067    codec.pacsize = frame_size_samples;
2068  }
2069  return acm_old_->RegisterSendCodec(codec) == 0;
2070}
2071
2072const AudioEncoder* AudioCodingImpl::GetSenderInfo() const {
2073  FATAL() << "Not implemented yet.";
2074}
2075
2076const CodecInst* AudioCodingImpl::GetSenderCodecInst() {
2077  if (acm_old_->SendCodec(&current_send_codec_) != 0) {
2078    return NULL;
2079  }
2080  return &current_send_codec_;
2081}
2082
2083int AudioCodingImpl::Add10MsAudio(const AudioFrame& audio_frame) {
2084  if (acm_old_->Add10MsData(audio_frame) != 0) {
2085    return -1;
2086  }
2087  return acm_old_->Process();
2088}
2089
2090const ReceiverInfo* AudioCodingImpl::GetReceiverInfo() const {
2091  FATAL() << "Not implemented yet.";
2092}
2093
2094bool AudioCodingImpl::RegisterReceiveCodec(AudioDecoder* receive_codec) {
2095  FATAL() << "Not implemented yet.";
2096}
2097
2098bool AudioCodingImpl::RegisterReceiveCodec(int decoder_type,
2099                                           uint8_t payload_type) {
2100  std::string codec_name;
2101  int sample_rate_hz;
2102  int channels;
2103  if (!MapCodecTypeToParameters(
2104          decoder_type, &codec_name, &sample_rate_hz, &channels)) {
2105    return false;
2106  }
2107  webrtc::CodecInst codec;
2108  AudioCodingModule::Codec(
2109      codec_name.c_str(), &codec, sample_rate_hz, channels);
2110  codec.pltype = payload_type;
2111  return acm_old_->RegisterReceiveCodec(codec) == 0;
2112}
2113
2114bool AudioCodingImpl::InsertPacket(const uint8_t* incoming_payload,
2115                                   int32_t payload_len_bytes,
2116                                   const WebRtcRTPHeader& rtp_info) {
2117  return acm_old_->IncomingPacket(
2118             incoming_payload, payload_len_bytes, rtp_info) == 0;
2119}
2120
2121bool AudioCodingImpl::InsertPayload(const uint8_t* incoming_payload,
2122                                    int32_t payload_len_byte,
2123                                    uint8_t payload_type,
2124                                    uint32_t timestamp) {
2125  FATAL() << "Not implemented yet.";
2126}
2127
2128bool AudioCodingImpl::SetMinimumPlayoutDelay(int time_ms) {
2129  FATAL() << "Not implemented yet.";
2130}
2131
2132bool AudioCodingImpl::SetMaximumPlayoutDelay(int time_ms) {
2133  FATAL() << "Not implemented yet.";
2134}
2135
2136int AudioCodingImpl::LeastRequiredDelayMs() const {
2137  FATAL() << "Not implemented yet.";
2138}
2139
2140bool AudioCodingImpl::PlayoutTimestamp(uint32_t* timestamp) {
2141  FATAL() << "Not implemented yet.";
2142}
2143
2144bool AudioCodingImpl::Get10MsAudio(AudioFrame* audio_frame) {
2145  return acm_old_->PlayoutData10Ms(playout_frequency_hz_, audio_frame) == 0;
2146}
2147
2148bool AudioCodingImpl::NetworkStatistics(
2149    ACMNetworkStatistics* network_statistics) {
2150  FATAL() << "Not implemented yet.";
2151}
2152
2153bool AudioCodingImpl::EnableNack(size_t max_nack_list_size) {
2154  FATAL() << "Not implemented yet.";
2155}
2156
2157void AudioCodingImpl::DisableNack() {
2158  FATAL() << "Not implemented yet.";
2159}
2160
2161bool AudioCodingImpl::SetVad(bool enable_dtx,
2162                             bool enable_vad,
2163                             ACMVADMode vad_mode) {
2164  return acm_old_->SetVAD(enable_dtx, enable_vad, vad_mode) == 0;
2165}
2166
2167std::vector<uint16_t> AudioCodingImpl::GetNackList(
2168    int round_trip_time_ms) const {
2169  return acm_old_->GetNackList(round_trip_time_ms);
2170}
2171
2172void AudioCodingImpl::GetDecodingCallStatistics(
2173    AudioDecodingCallStats* call_stats) const {
2174  acm_old_->GetDecodingCallStatistics(call_stats);
2175}
2176
2177bool AudioCodingImpl::MapCodecTypeToParameters(int codec_type,
2178                                               std::string* codec_name,
2179                                               int* sample_rate_hz,
2180                                               int* channels) {
2181  switch (codec_type) {
2182#ifdef WEBRTC_CODEC_PCM16
2183    case acm2::ACMCodecDB::kPCM16B:
2184      *codec_name = "L16";
2185      *sample_rate_hz = 8000;
2186      *channels = 1;
2187      break;
2188    case acm2::ACMCodecDB::kPCM16Bwb:
2189      *codec_name = "L16";
2190      *sample_rate_hz = 16000;
2191      *channels = 1;
2192      break;
2193    case acm2::ACMCodecDB::kPCM16Bswb32kHz:
2194      *codec_name = "L16";
2195      *sample_rate_hz = 32000;
2196      *channels = 1;
2197      break;
2198    case acm2::ACMCodecDB::kPCM16B_2ch:
2199      *codec_name = "L16";
2200      *sample_rate_hz = 8000;
2201      *channels = 2;
2202      break;
2203    case acm2::ACMCodecDB::kPCM16Bwb_2ch:
2204      *codec_name = "L16";
2205      *sample_rate_hz = 16000;
2206      *channels = 2;
2207      break;
2208    case acm2::ACMCodecDB::kPCM16Bswb32kHz_2ch:
2209      *codec_name = "L16";
2210      *sample_rate_hz = 32000;
2211      *channels = 2;
2212      break;
2213#endif
2214#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
2215    case acm2::ACMCodecDB::kISAC:
2216      *codec_name = "ISAC";
2217      *sample_rate_hz = 16000;
2218      *channels = 1;
2219      break;
2220#endif
2221#ifdef WEBRTC_CODEC_ISAC
2222    case acm2::ACMCodecDB::kISACSWB:
2223      *codec_name = "ISAC";
2224      *sample_rate_hz = 32000;
2225      *channels = 1;
2226      break;
2227    case acm2::ACMCodecDB::kISACFB:
2228      *codec_name = "ISAC";
2229      *sample_rate_hz = 48000;
2230      *channels = 1;
2231      break;
2232#endif
2233#ifdef WEBRTC_CODEC_ILBC
2234    case acm2::ACMCodecDB::kILBC:
2235      *codec_name = "ILBC";
2236      *sample_rate_hz = 8000;
2237      *channels = 1;
2238      break;
2239#endif
2240    case acm2::ACMCodecDB::kPCMA:
2241      *codec_name = "PCMA";
2242      *sample_rate_hz = 8000;
2243      *channels = 1;
2244      break;
2245    case acm2::ACMCodecDB::kPCMA_2ch:
2246      *codec_name = "PCMA";
2247      *sample_rate_hz = 8000;
2248      *channels = 2;
2249      break;
2250    case acm2::ACMCodecDB::kPCMU:
2251      *codec_name = "PCMU";
2252      *sample_rate_hz = 8000;
2253      *channels = 1;
2254      break;
2255    case acm2::ACMCodecDB::kPCMU_2ch:
2256      *codec_name = "PCMU";
2257      *sample_rate_hz = 8000;
2258      *channels = 2;
2259      break;
2260#ifdef WEBRTC_CODEC_G722
2261    case acm2::ACMCodecDB::kG722:
2262      *codec_name = "G722";
2263      *sample_rate_hz = 16000;
2264      *channels = 1;
2265      break;
2266    case acm2::ACMCodecDB::kG722_2ch:
2267      *codec_name = "G722";
2268      *sample_rate_hz = 16000;
2269      *channels = 2;
2270      break;
2271#endif
2272#ifdef WEBRTC_CODEC_OPUS
2273    case acm2::ACMCodecDB::kOpus:
2274      *codec_name = "opus";
2275      *sample_rate_hz = 48000;
2276      *channels = 2;
2277      break;
2278#endif
2279    case acm2::ACMCodecDB::kCNNB:
2280      *codec_name = "CN";
2281      *sample_rate_hz = 8000;
2282      *channels = 1;
2283      break;
2284    case acm2::ACMCodecDB::kCNWB:
2285      *codec_name = "CN";
2286      *sample_rate_hz = 16000;
2287      *channels = 1;
2288      break;
2289    case acm2::ACMCodecDB::kCNSWB:
2290      *codec_name = "CN";
2291      *sample_rate_hz = 32000;
2292      *channels = 1;
2293      break;
2294    case acm2::ACMCodecDB::kRED:
2295      *codec_name = "red";
2296      *sample_rate_hz = 8000;
2297      *channels = 1;
2298      break;
2299#ifdef WEBRTC_CODEC_AVT
2300    case acm2::ACMCodecDB::kAVT:
2301      *codec_name = "telephone-event";
2302      *sample_rate_hz = 8000;
2303      *channels = 1;
2304      break;
2305#endif
2306    default:
2307      FATAL() << "Codec type " << codec_type << " not supported.";
2308  }
2309  return true;
2310}
2311
2312}  // namespace webrtc
2313