Searched refs:audioFrame (Results 1 - 19 of 19) sorted by relevance

/external/chromium_org/third_party/webrtc/modules/audio_conference_mixer/source/
H A Daudio_frame_manipulator.h17 // Updates the audioFrame's energy (based on its samples).
18 void CalculateEnergy(AudioFrame& audioFrame);
20 // Apply linear step function that ramps in/out the audio samples in audioFrame
21 void RampIn(AudioFrame& audioFrame);
22 void RampOut(AudioFrame& audioFrame);
H A Daudio_frame_manipulator.cc42 void CalculateEnergy(AudioFrame& audioFrame) argument
44 audioFrame.energy_ = 0;
45 for(int position = 0; position < audioFrame.samples_per_channel_;
49 audioFrame.energy_ += audioFrame.data_[position] *
50 audioFrame.data_[position];
54 void RampIn(AudioFrame& audioFrame) argument
56 assert(rampSize <= audioFrame.samples_per_channel_);
59 audioFrame.data_[i] = static_cast<int16_t>(rampArray[i] *
60 audioFrame
64 RampOut(AudioFrame& audioFrame) argument
[all...]
H A Daudio_conference_mixer_impl.cc24 AudioFrame* audioFrame; member in struct:webrtc::__anon15852::ParticipantFramePair
648 AudioFrame* audioFrame = NULL; local
649 if(_audioFramePool->PopMemory(audioFrame) == -1) {
655 audioFrame->sample_rate_hz_ = _outputFrequency;
657 if((*participant)->GetAudioFrame(_id,*audioFrame) != 0) {
660 _audioFramePool->PushMemory(audioFrame);
665 audioFrame->ntp_time_ms_ = -1;
671 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
672 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
677 if(audioFrame
825 AudioFrame* audioFrame = NULL; local
[all...]
/external/chromium_org/third_party/webrtc/modules/audio_coding/main/test/
H A DSpatialAudio.cc153 AudioFrame audioFrame; local
161 _inFile.Read10MsData(audioFrame);
162 for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
163 audioFrame.data_[n] = (int16_t) floor(
164 audioFrame.data_[n] * leftPanning + 0.5);
166 CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
168 for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
169 audioFrame.data_[n] = (int16_t) floor(
170 audioFrame.data_[n] * rightToLeftRatio + 0.5);
172 CHECK_ERROR(_acmRight->Add10MsData(audioFrame));
184 AudioFrame audioFrame; local
[all...]
H A DTwoWayCommunication.cc273 AudioFrame audioFrame; local
288 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
289 EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
290 EXPECT_EQ(0, _acmRefA->Add10MsData(audioFrame));
292 EXPECT_GT(_inFileB.Read10MsData(audioFrame), 0);
296 EXPECT_EQ(0, _acmB->Add10MsData(audioFrame));
298 EXPECT_EQ(-1, _acmB->Add10MsData(audioFrame));
307 EXPECT_EQ(0, _acmRefB->Add10MsData(audioFrame));
311 EXPECT_EQ(0, _acmA->PlayoutData10Ms(outFreqHzA, &audioFrame));
312 _outFileA.Write10MsData(audioFrame);
[all...]
H A DTestVADDTX.cc235 AudioFrame audioFrame; local
242 _inFileA.Read10MsData(audioFrame);
243 audioFrame.timestamp_ = timestampA;
245 EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
247 EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
248 _outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
H A DTestRedFec.cc295 AudioFrame audioFrame; local
302 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
303 EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
305 EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
306 _outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
H A DiSACTest.cc247 AudioFrame audioFrame; local
248 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
249 EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
250 EXPECT_EQ(0, _acmB->Add10MsData(audioFrame));
253 EXPECT_EQ(0, _acmA->PlayoutData10Ms(32000, &audioFrame));
254 _outFileA.Write10MsData(audioFrame);
255 EXPECT_EQ(0, _acmB->PlayoutData10Ms(32000, &audioFrame));
256 _outFileB.Write10MsData(audioFrame);
H A DEncodeDecodeTest.cc211 AudioFrame audioFrame; local
213 int32_t ok =_acm->PlayoutData10Ms(_frequency, &audioFrame);
221 _pcmFile.Write10MsData(audioFrame.data_,
222 audioFrame.samples_per_channel_ * audioFrame.num_channels_);
H A DAPITest.cc337 AudioFrame audioFrame; local
338 if (_acmA->PlayoutData10Ms(_outFreqHzA, &audioFrame) < 0) {
349 _outFileA.Write10MsData(audioFrame);
357 AudioFrame audioFrame; local
358 if (_acmB->PlayoutData10Ms(_outFreqHzB, &audioFrame) < 0) {
370 _outFileB.Write10MsData(audioFrame);
378 AudioFrame audioFrame; local
379 _inFileA.Read10MsData(audioFrame);
380 if (_acmA->Add10MsData(audioFrame) < 0) {
395 AudioFrame audioFrame; local
[all...]
/external/chromium_org/third_party/webrtc/voice_engine/
H A Dlevel_indicator.cc48 void AudioLevel::ComputeLevel(const AudioFrame& audioFrame) argument
54 audioFrame.data_,
55 audioFrame.samples_per_channel_*audioFrame.num_channels_);
H A Dlevel_indicator.h38 void ComputeLevel(const AudioFrame& audioFrame);
H A Dchannel.cc576 int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame) argument
582 if (audio_coding_->PlayoutData10Ms(audioFrame.sample_rate_hz_,
583 &audioFrame) == -1)
597 UpdateRxVadDetection(audioFrame);
601 audioFrame.id_ = VoEChannelId(audioFrame.id_);
603 _outputSpeechType = audioFrame.speech_type_;
608 int err = rx_audioproc_->ProcessStream(&audioFrame);
628 AudioFrameOperations::ScaleWithSat(output_gain, audioFrame);
636 if (audioFrame
2694 UpdateRxVadDetection(AudioFrame& audioFrame) argument
3660 Demultiplex(const AudioFrame& audioFrame) argument
4127 MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) argument
[all...]
H A Doutput_mixer.h68 AudioFrame* audioFrame);
H A Dchannel.h303 int UpdateRxVadDetection(AudioFrame& audioFrame);
420 int32_t GetAudioFrame(int32_t id, AudioFrame& audioFrame);
471 uint32_t Demultiplex(const AudioFrame& audioFrame);
498 int32_t MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency);
/external/chromium_org/third_party/webrtc/modules/utility/source/
H A Dcoder.cc86 AudioFrame audioFrame; local
87 audioFrame.CopyFrom(audio);
88 audioFrame.timestamp_ = _encodeTimestamp;
89 _encodeTimestamp += audioFrame.samples_per_channel_;
94 if(_acm->Add10MsData((AudioFrame&)audioFrame) == -1)
/external/webrtc/src/modules/audio_processing/
H A Daudio_buffer.h49 void DeinterleaveFrom(AudioFrame* audioFrame);
50 void InterleaveTo(AudioFrame* audioFrame) const;
/external/chromium_org/third_party/webrtc/modules/audio_conference_mixer/interface/
H A Daudio_conference_mixer_defines.h24 // The implementation of this function should update audioFrame with new
28 virtual int32_t GetAudioFrame(const int32_t id, AudioFrame& audioFrame) = 0;
/external/chromium_org/third_party/webrtc/modules/audio_processing/
H A Daudio_buffer.h89 void DeinterleaveFrom(AudioFrame* audioFrame);

Completed in 270 milliseconds