1/* 2 * Copyright (C) 2017 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define LOG_TAG "AAudioServiceEndpointPlay" 18//#define LOG_NDEBUG 0 19#include <utils/Log.h> 20 21#include <assert.h> 22#include <map> 23#include <mutex> 24#include <utils/Singleton.h> 25 26#include "AAudioEndpointManager.h" 27#include "AAudioServiceEndpoint.h" 28#include <algorithm> 29#include <mutex> 30#include <vector> 31 32#include "core/AudioStreamBuilder.h" 33#include "AAudioServiceEndpoint.h" 34#include "AAudioServiceStreamShared.h" 35#include "AAudioServiceEndpointPlay.h" 36#include "AAudioServiceEndpointShared.h" 37 38using namespace android; // TODO just import names needed 39using namespace aaudio; // TODO just import names needed 40 41#define BURSTS_PER_BUFFER_DEFAULT 2 42 43AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService &audioService) 44 : mStreamInternalPlay(audioService, true) { 45 mStreamInternal = &mStreamInternalPlay; 46} 47 48AAudioServiceEndpointPlay::~AAudioServiceEndpointPlay() { 49} 50 51aaudio_result_t AAudioServiceEndpointPlay::open(const aaudio::AAudioStreamRequest &request) { 52 aaudio_result_t result = AAudioServiceEndpointShared::open(request); 53 if (result == AAUDIO_OK) { 54 mMixer.allocate(getStreamInternal()->getSamplesPerFrame(), 55 getStreamInternal()->getFramesPerBurst()); 56 57 int32_t burstsPerBuffer = AAudioProperty_getMixerBursts(); 58 if (burstsPerBuffer == 0) { 59 mLatencyTuningEnabled = true; 60 burstsPerBuffer = BURSTS_PER_BUFFER_DEFAULT; 61 } 62 int32_t desiredBufferSize = burstsPerBuffer * getStreamInternal()->getFramesPerBurst(); 63 getStreamInternal()->setBufferSize(desiredBufferSize); 64 } 65 return result; 66} 67 68// Mix data from each application stream and write result to the shared MMAP stream. 69void *AAudioServiceEndpointPlay::callbackLoop() { 70 aaudio_result_t result = AAUDIO_OK; 71 int64_t timeoutNanos = getStreamInternal()->calculateReasonableTimeout(); 72 73 // result might be a frame count 74 while (mCallbackEnabled.load() && getStreamInternal()->isActive() && (result >= 0)) { 75 // Mix data from each active stream. 76 mMixer.clear(); 77 78 { // brackets are for lock_guard 79 int index = 0; 80 int64_t mmapFramesWritten = getStreamInternal()->getFramesWritten(); 81 82 std::lock_guard <std::mutex> lock(mLockStreams); 83 for (const auto clientStream : mRegisteredStreams) { 84 int64_t clientFramesRead = 0; 85 86 if (!clientStream->isRunning()) { 87 continue; 88 } 89 90 sp<AAudioServiceStreamShared> streamShared = 91 static_cast<AAudioServiceStreamShared *>(clientStream.get()); 92 93 { 94 // Lock the AudioFifo to protect against close. 95 std::lock_guard <std::mutex> lock(streamShared->getAudioDataQueueLock()); 96 97 FifoBuffer *fifo = streamShared->getAudioDataFifoBuffer_l(); 98 if (fifo != nullptr) { 99 100 // Determine offset between framePosition in client's stream 101 // vs the underlying MMAP stream. 102 clientFramesRead = fifo->getReadCounter(); 103 // These two indices refer to the same frame. 104 int64_t positionOffset = mmapFramesWritten - clientFramesRead; 105 streamShared->setTimestampPositionOffset(positionOffset); 106 107 float volume = 1.0; // to match legacy volume 108 bool underflowed = mMixer.mix(index, fifo, volume); 109 if (underflowed) { 110 streamShared->incrementXRunCount(); 111 } 112 clientFramesRead = fifo->getReadCounter(); 113 } 114 } 115 116 if (clientFramesRead > 0) { 117 // This timestamp represents the completion of data being read out of the 118 // client buffer. It is sent to the client and used in the timing model 119 // to decide when the client has room to write more data. 120 Timestamp timestamp(clientFramesRead, AudioClock::getNanoseconds()); 121 streamShared->markTransferTime(timestamp); 122 } 123 124 index++; // just used for labelling tracks in systrace 125 } 126 } 127 128 // Write mixer output to stream using a blocking write. 129 result = getStreamInternal()->write(mMixer.getOutputBuffer(), 130 getFramesPerBurst(), timeoutNanos); 131 if (result == AAUDIO_ERROR_DISCONNECTED) { 132 AAudioServiceEndpointShared::disconnectRegisteredStreams(); 133 break; 134 } else if (result != getFramesPerBurst()) { 135 ALOGW("AAudioServiceEndpoint(): callbackLoop() wrote %d / %d", 136 result, getFramesPerBurst()); 137 break; 138 } 139 } 140 141 return NULL; // TODO review 142} 143