Searched refs:audioFrame (Results 1 - 17 of 17) sorted by relevance

/external/webrtc/webrtc/modules/audio_conference_mixer/source/
H A Daudio_frame_manipulator.h17 // Updates the audioFrame's energy (based on its samples).
18 void CalculateEnergy(AudioFrame& audioFrame);
20 // Apply linear step function that ramps in/out the audio samples in audioFrame
21 void RampIn(AudioFrame& audioFrame);
22 void RampOut(AudioFrame& audioFrame);
H A Daudio_frame_manipulator.cc42 void CalculateEnergy(AudioFrame& audioFrame) argument
44 audioFrame.energy_ = 0;
45 for(size_t position = 0; position < audioFrame.samples_per_channel_;
49 audioFrame.energy_ += audioFrame.data_[position] *
50 audioFrame.data_[position];
54 void RampIn(AudioFrame& audioFrame) argument
56 assert(rampSize <= audioFrame.samples_per_channel_);
59 audioFrame.data_[i] = static_cast<int16_t>(rampArray[i] *
60 audioFrame
64 RampOut(AudioFrame& audioFrame) argument
[all...]
H A Daudio_conference_mixer_impl.cc24 AudioFrame* audioFrame; member in struct:webrtc::__anon21477::ParticipantFramePair
548 AudioFrame* audioFrame = NULL; local
549 if(_audioFramePool->PopMemory(audioFrame) == -1) {
555 audioFrame->sample_rate_hz_ = _outputFrequency;
557 if((*participant)->GetAudioFrame(_id, audioFrame) != 0) {
560 _audioFramePool->PushMemory(audioFrame);
565 audioFrame->ntp_time_ms_ = -1;
571 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
572 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
577 if(audioFrame
725 AudioFrame* audioFrame = NULL; local
[all...]
/external/webrtc/webrtc/modules/audio_coding/test/
H A DSpatialAudio.cc153 AudioFrame audioFrame; local
161 _inFile.Read10MsData(audioFrame);
162 for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
163 audioFrame.data_[n] = (int16_t) floor(
164 audioFrame.data_[n] * leftPanning + 0.5);
166 CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
168 for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
169 audioFrame.data_[n] = (int16_t) floor(
170 audioFrame.data_[n] * rightToLeftRatio + 0.5);
172 CHECK_ERROR(_acmRight->Add10MsData(audioFrame));
181 AudioFrame audioFrame; local
[all...]
H A DTwoWayCommunication.cc251 AudioFrame audioFrame; local
262 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
263 EXPECT_GE(_acmA->Add10MsData(audioFrame), 0);
264 EXPECT_GE(_acmRefA->Add10MsData(audioFrame), 0);
266 EXPECT_GT(_inFileB.Read10MsData(audioFrame), 0);
268 EXPECT_GE(_acmB->Add10MsData(audioFrame), 0);
269 EXPECT_GE(_acmRefB->Add10MsData(audioFrame), 0);
270 EXPECT_EQ(0, _acmA->PlayoutData10Ms(outFreqHzA, &audioFrame));
271 _outFileA.Write10MsData(audioFrame);
272 EXPECT_EQ(0, _acmRefA->PlayoutData10Ms(outFreqHzA, &audioFrame));
[all...]
H A DiSACTest.cc198 AudioFrame audioFrame; local
199 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
200 EXPECT_GE(_acmA->Add10MsData(audioFrame), 0);
201 EXPECT_GE(_acmB->Add10MsData(audioFrame), 0);
202 EXPECT_EQ(0, _acmA->PlayoutData10Ms(32000, &audioFrame));
203 _outFileA.Write10MsData(audioFrame);
204 EXPECT_EQ(0, _acmB->PlayoutData10Ms(32000, &audioFrame));
205 _outFileB.Write10MsData(audioFrame);
H A DEncodeDecodeTest.cc210 AudioFrame audioFrame; local
212 int32_t ok =_acm->PlayoutData10Ms(_frequency, &audioFrame);
220 _pcmFile.Write10MsData(audioFrame.data_,
221 audioFrame.samples_per_channel_ * audioFrame.num_channels_);
H A DTestRedFec.cc454 AudioFrame audioFrame; local
462 EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
463 EXPECT_GE(_acmA->Add10MsData(audioFrame), 0);
464 EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
465 _outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
H A DAPITest.cc325 AudioFrame audioFrame; local
326 if (_acmA->PlayoutData10Ms(_outFreqHzA, &audioFrame) < 0) {
337 _outFileA.Write10MsData(audioFrame);
345 AudioFrame audioFrame; local
346 if (_acmB->PlayoutData10Ms(_outFreqHzB, &audioFrame) < 0) {
358 _outFileB.Write10MsData(audioFrame);
366 AudioFrame audioFrame; local
367 _inFileA.Read10MsData(audioFrame);
368 if (_acmA->Add10MsData(audioFrame) < 0) {
383 AudioFrame audioFrame; local
[all...]
/external/webrtc/webrtc/voice_engine/
H A Dlevel_indicator.cc48 void AudioLevel::ComputeLevel(const AudioFrame& audioFrame) argument
54 audioFrame.data_,
55 audioFrame.samples_per_channel_*audioFrame.num_channels_);
H A Dlevel_indicator.h38 void ComputeLevel(const AudioFrame& audioFrame);
H A Dchannel.cc523 int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame) argument
531 if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_,
532 audioFrame) == -1)
546 UpdateRxVadDetection(*audioFrame);
550 audioFrame->id_ = VoEChannelId(audioFrame->id_);
552 _outputSpeechType = audioFrame->speech_type_;
557 int err = rx_audioproc_->ProcessStream(audioFrame);
572 &audioFrame->data_[0],
573 audioFrame
2577 UpdateRxVadDetection(AudioFrame& audioFrame) argument
3346 Demultiplex(const AudioFrame& audioFrame) argument
3736 MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) argument
[all...]
H A Doutput_mixer.h67 AudioFrame* audioFrame);
H A Dchannel.h308 int UpdateRxVadDetection(AudioFrame& audioFrame);
401 int32_t GetAudioFrame(int32_t id, AudioFrame* audioFrame) override;
447 uint32_t Demultiplex(const AudioFrame& audioFrame);
483 int32_t MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency);
/external/webrtc/webrtc/modules/utility/source/
H A Dcoder.cc83 AudioFrame audioFrame; local
84 audioFrame.CopyFrom(audio);
85 audioFrame.timestamp_ = _encodeTimestamp;
86 _encodeTimestamp += static_cast<uint32_t>(audioFrame.samples_per_channel_);
91 if(_acm->Add10MsData((AudioFrame&)audioFrame) == -1)
/external/webrtc/webrtc/modules/audio_conference_mixer/include/
H A Daudio_conference_mixer_defines.h24 // The implementation of this function should update audioFrame with new
29 AudioFrame* audioFrame) = 0;
/external/webrtc/webrtc/modules/audio_processing/
H A Daudio_buffer.h109 void DeinterleaveFrom(AudioFrame* audioFrame);

Completed in 1396 milliseconds