AudioTrack.java revision ff0d9f098e51c54e1a030ed21fd980680cb7b405
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.media;
18
19import java.lang.annotation.Retention;
20import java.lang.annotation.RetentionPolicy;
21import java.lang.ref.WeakReference;
22import java.nio.ByteBuffer;
23import java.nio.NioUtils;
24
25import android.annotation.IntDef;
26import android.app.ActivityThread;
27import android.app.AppOpsManager;
28import android.content.Context;
29import android.os.Handler;
30import android.os.IBinder;
31import android.os.Looper;
32import android.os.Message;
33import android.os.Process;
34import android.os.RemoteException;
35import android.os.ServiceManager;
36import android.util.Log;
37
38import com.android.internal.app.IAppOpsService;
39
40
41/**
42 * The AudioTrack class manages and plays a single audio resource for Java applications.
43 * It allows streaming of PCM audio buffers to the audio sink for playback. This is
44 * achieved by "pushing" the data to the AudioTrack object using one of the
45 *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
46 *  and {@link #write(float[], int, int, int)} methods.
47 *
48 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
49 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
50 * one of the {@code write()} methods. These are blocking and return when the data has been
51 * transferred from the Java layer to the native layer and queued for playback. The streaming
52 * mode is most useful when playing blocks of audio data that for instance are:
53 *
54 * <ul>
55 *   <li>too big to fit in memory because of the duration of the sound to play,</li>
56 *   <li>too big to fit in memory because of the characteristics of the audio data
57 *         (high sampling rate, bits per sample ...)</li>
58 *   <li>received or generated while previously queued audio is playing.</li>
59 * </ul>
60 *
61 * The static mode should be chosen when dealing with short sounds that fit in memory and
62 * that need to be played with the smallest latency possible. The static mode will
63 * therefore be preferred for UI and game sounds that are played often, and with the
64 * smallest overhead possible.
65 *
66 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
67 * The size of this buffer, specified during the construction, determines how long an AudioTrack
68 * can play before running out of data.<br>
69 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
70 * be played from it.<br>
71 * For the streaming mode, data will be written to the audio sink in chunks of
72 * sizes less than or equal to the total buffer size.
73 *
74 * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
75 */
76public class AudioTrack
77{
78    //---------------------------------------------------------
79    // Constants
80    //--------------------
81    /** Minimum value for a linear gain or auxiliary effect level.
82     *  This value must be exactly equal to 0.0f; do not change it.
83     */
84    private static final float GAIN_MIN = 0.0f;
85    /** Maximum value for a linear gain or auxiliary effect level.
86     *  This value must be greater than or equal to 1.0f.
87     */
88    private static final float GAIN_MAX = 1.0f;
89
90    /** Minimum value for sample rate */
91    private static final int SAMPLE_RATE_HZ_MIN = 4000;
92    /** Maximum value for sample rate */
93    private static final int SAMPLE_RATE_HZ_MAX = 48000;
94
95    /** indicates AudioTrack state is stopped */
96    public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
97    /** indicates AudioTrack state is paused */
98    public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
99    /** indicates AudioTrack state is playing */
100    public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
101
102    // keep these values in sync with android_media_AudioTrack.cpp
103    /**
104     * Creation mode where audio data is transferred from Java to the native layer
105     * only once before the audio starts playing.
106     */
107    public static final int MODE_STATIC = 0;
108    /**
109     * Creation mode where audio data is streamed from Java to the native layer
110     * as the audio is playing.
111     */
112    public static final int MODE_STREAM = 1;
113
114    /**
115     * State of an AudioTrack that was not successfully initialized upon creation.
116     */
117    public static final int STATE_UNINITIALIZED = 0;
118    /**
119     * State of an AudioTrack that is ready to be used.
120     */
121    public static final int STATE_INITIALIZED   = 1;
122    /**
123     * State of a successfully initialized AudioTrack that uses static data,
124     * but that hasn't received that data yet.
125     */
126    public static final int STATE_NO_STATIC_DATA = 2;
127
128    /**
129     * Denotes a successful operation.
130     */
131    public  static final int SUCCESS                               = AudioSystem.SUCCESS;
132    /**
133     * Denotes a generic operation failure.
134     */
135    public  static final int ERROR                                 = AudioSystem.ERROR;
136    /**
137     * Denotes a failure due to the use of an invalid value.
138     */
139    public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
140    /**
141     * Denotes a failure due to the improper use of a method.
142     */
143    public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
144
145    // Error codes:
146    // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
147    private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
148    private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
149    private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
150    private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
151    private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
152
153    // Events:
154    // to keep in sync with frameworks/av/include/media/AudioTrack.h
155    /**
156     * Event id denotes when playback head has reached a previously set marker.
157     */
158    private static final int NATIVE_EVENT_MARKER  = 3;
159    /**
160     * Event id denotes when previously set update period has elapsed during playback.
161     */
162    private static final int NATIVE_EVENT_NEW_POS = 4;
163
164    private final static String TAG = "android.media.AudioTrack";
165
166
167    /** @hide */
168    @IntDef({
169        WRITE_BLOCKING,
170        WRITE_NON_BLOCKING
171    })
172    @Retention(RetentionPolicy.SOURCE)
173    public @interface WriteMode {}
174
175    /**
176     * The write mode indicating the write operation will block until all data has been written,
177     * to be used in {@link #write(ByteBuffer, int, int)}
178     */
179    public final static int WRITE_BLOCKING = 0;
180    /**
181     * The write mode indicating the write operation will return immediately after
182     * queuing as much audio data for playback as possible without blocking, to be used in
183     * {@link #write(ByteBuffer, int, int)}.
184     */
185    public final static int WRITE_NON_BLOCKING = 1;
186
187    //--------------------------------------------------------------------------
188    // Member variables
189    //--------------------
190    /**
191     * Indicates the state of the AudioTrack instance.
192     */
193    private int mState = STATE_UNINITIALIZED;
194    /**
195     * Indicates the play state of the AudioTrack instance.
196     */
197    private int mPlayState = PLAYSTATE_STOPPED;
198    /**
199     * Lock to make sure mPlayState updates are reflecting the actual state of the object.
200     */
201    private final Object mPlayStateLock = new Object();
202    /**
203     * Sizes of the native audio buffer.
204     */
205    private int mNativeBufferSizeInBytes = 0;
206    private int mNativeBufferSizeInFrames = 0;
207    /**
208     * Handler for events coming from the native code.
209     */
210    private NativeEventHandlerDelegate mEventHandlerDelegate;
211    /**
212     * Looper associated with the thread that creates the AudioTrack instance.
213     */
214    private final Looper mInitializationLooper;
215    /**
216     * The audio data source sampling rate in Hz.
217     */
218    private int mSampleRate; // initialized by all constructors
219    /**
220     * The number of audio output channels (1 is mono, 2 is stereo).
221     */
222    private int mChannelCount = 1;
223    /**
224     * The audio channel mask.
225     */
226    private int mChannels = AudioFormat.CHANNEL_OUT_MONO;
227
228    /**
229     * The type of the audio stream to play. See
230     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
231     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
232     *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
233     *   {@link AudioManager#STREAM_DTMF}.
234     */
235    private int mStreamType = AudioManager.STREAM_MUSIC;
236    /**
237     * The way audio is consumed by the audio sink, streaming or static.
238     */
239    private int mDataLoadMode = MODE_STREAM;
240    /**
241     * The current audio channel configuration.
242     */
243    private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
244    /**
245     * The encoding of the audio samples.
246     * @see AudioFormat#ENCODING_PCM_8BIT
247     * @see AudioFormat#ENCODING_PCM_16BIT
248     * @see AudioFormat#ENCODING_PCM_FLOAT
249     */
250    private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
251    /**
252     * Audio session ID
253     */
254    private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
255    /**
256     * Reference to the app-ops service.
257     */
258    private final IAppOpsService mAppOps;
259
260    //--------------------------------
261    // Used exclusively by native code
262    //--------------------
263    /**
264     * Accessed by native methods: provides access to C++ AudioTrack object.
265     */
266    @SuppressWarnings("unused")
267    private long mNativeTrackInJavaObj;
268    /**
269     * Accessed by native methods: provides access to the JNI data (i.e. resources used by
270     * the native AudioTrack object, but not stored in it).
271     */
272    @SuppressWarnings("unused")
273    private long mJniData;
274
275
276    //--------------------------------------------------------------------------
277    // Constructor, Finalize
278    //--------------------
279    /**
280     * Class constructor.
281     * @param streamType the type of the audio stream. See
282     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
283     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
284     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
285     * @param sampleRateInHz the initial source sample rate expressed in Hz.
286     * @param channelConfig describes the configuration of the audio channels.
287     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
288     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
289     * @param audioFormat the format in which the audio data is represented.
290     *   See {@link AudioFormat#ENCODING_PCM_16BIT},
291     *   {@link AudioFormat#ENCODING_PCM_8BIT},
292     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
293     * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
294     *   read from for playback.
295     *   If track's creation mode is {@link #MODE_STREAM}, you can write data into
296     *   this buffer in chunks less than or equal to this size, and it is typical to use
297     *   chunks of 1/2 of the total size to permit double-buffering.
298     *   If the track's creation mode is {@link #MODE_STATIC},
299     *   this is the maximum length sample, or audio clip, that can be played by this instance.
300     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
301     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
302     *   smaller than getMinBufferSize() will result in an initialization failure.
303     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
304     * @throws java.lang.IllegalArgumentException
305     */
306    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
307            int bufferSizeInBytes, int mode)
308    throws IllegalArgumentException {
309        this(streamType, sampleRateInHz, channelConfig, audioFormat,
310                bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE);
311    }
312
313    /**
314     * Class constructor with audio session. Use this constructor when the AudioTrack must be
315     * attached to a particular audio session. The primary use of the audio session ID is to
316     * associate audio effects to a particular instance of AudioTrack: if an audio session ID
317     * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
318     * and media players in the same session and not to the output mix.
319     * When an AudioTrack is created without specifying a session, it will create its own session
320     * which can be retrieved by calling the {@link #getAudioSessionId()} method.
321     * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
322     * session
323     * with all other media players or audio tracks in the same session, otherwise a new session
324     * will be created for this track if none is supplied.
325     * @param streamType the type of the audio stream. See
326     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
327     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
328     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
329     * @param sampleRateInHz the initial source sample rate expressed in Hz.
330     * @param channelConfig describes the configuration of the audio channels.
331     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
332     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
333     * @param audioFormat the format in which the audio data is represented.
334     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
335     *   {@link AudioFormat#ENCODING_PCM_8BIT},
336     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
337     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
338     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
339     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
340     *   this is the maximum size of the sound that will be played for this instance.
341     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
342     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
343     *   smaller than getMinBufferSize() will result in an initialization failure.
344     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
345     * @param sessionId Id of audio session the AudioTrack must be attached to
346     * @throws java.lang.IllegalArgumentException
347     */
348    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
349            int bufferSizeInBytes, int mode, int sessionId)
350    throws IllegalArgumentException {
351        // mState already == STATE_UNINITIALIZED
352
353        // remember which looper is associated with the AudioTrack instantiation
354        Looper looper;
355        if ((looper = Looper.myLooper()) == null) {
356            looper = Looper.getMainLooper();
357        }
358        mInitializationLooper = looper;
359
360        audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);
361
362        audioBuffSizeCheck(bufferSizeInBytes);
363
364        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
365        mAppOps = IAppOpsService.Stub.asInterface(b);
366
367        if (sessionId < 0) {
368            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
369        }
370
371        int[] session = new int[1];
372        session[0] = sessionId;
373        // native initialization
374        int initResult = native_setup(new WeakReference<AudioTrack>(this),
375                mStreamType, mSampleRate, mChannels, mAudioFormat,
376                mNativeBufferSizeInBytes, mDataLoadMode, session);
377        if (initResult != SUCCESS) {
378            loge("Error code "+initResult+" when initializing AudioTrack.");
379            return; // with mState == STATE_UNINITIALIZED
380        }
381
382        mSessionId = session[0];
383
384        if (mDataLoadMode == MODE_STATIC) {
385            mState = STATE_NO_STATIC_DATA;
386        } else {
387            mState = STATE_INITIALIZED;
388        }
389    }
390
391    // mask of all the channels supported by this implementation
392    private static final int SUPPORTED_OUT_CHANNELS =
393            AudioFormat.CHANNEL_OUT_FRONT_LEFT |
394            AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
395            AudioFormat.CHANNEL_OUT_FRONT_CENTER |
396            AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
397            AudioFormat.CHANNEL_OUT_BACK_LEFT |
398            AudioFormat.CHANNEL_OUT_BACK_RIGHT |
399            AudioFormat.CHANNEL_OUT_BACK_CENTER;
400
401    // Convenience method for the constructor's parameter checks.
402    // This is where constructor IllegalArgumentException-s are thrown
403    // postconditions:
404    //    mStreamType is valid
405    //    mChannelCount is valid
406    //    mChannels is valid
407    //    mAudioFormat is valid
408    //    mSampleRate is valid
409    //    mDataLoadMode is valid
410    private void audioParamCheck(int streamType, int sampleRateInHz,
411                                 int channelConfig, int audioFormat, int mode) {
412
413        //--------------
414        // stream type
415        if( (streamType != AudioManager.STREAM_ALARM) && (streamType != AudioManager.STREAM_MUSIC)
416           && (streamType != AudioManager.STREAM_RING) && (streamType != AudioManager.STREAM_SYSTEM)
417           && (streamType != AudioManager.STREAM_VOICE_CALL)
418           && (streamType != AudioManager.STREAM_NOTIFICATION)
419           && (streamType != AudioManager.STREAM_BLUETOOTH_SCO)
420           && (streamType != AudioManager.STREAM_DTMF)) {
421            throw new IllegalArgumentException("Invalid stream type.");
422        }
423        mStreamType = streamType;
424
425        //--------------
426        // sample rate, note these values are subject to change
427        if ( (sampleRateInHz < 4000) || (sampleRateInHz > 48000) ) {
428            throw new IllegalArgumentException(sampleRateInHz
429                    + "Hz is not a supported sample rate.");
430        }
431        mSampleRate = sampleRateInHz;
432
433        //--------------
434        // channel config
435        mChannelConfiguration = channelConfig;
436
437        switch (channelConfig) {
438        case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
439        case AudioFormat.CHANNEL_OUT_MONO:
440        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
441            mChannelCount = 1;
442            mChannels = AudioFormat.CHANNEL_OUT_MONO;
443            break;
444        case AudioFormat.CHANNEL_OUT_STEREO:
445        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
446            mChannelCount = 2;
447            mChannels = AudioFormat.CHANNEL_OUT_STEREO;
448            break;
449        default:
450            if (!isMultichannelConfigSupported(channelConfig)) {
451                // input channel configuration features unsupported channels
452                throw new IllegalArgumentException("Unsupported channel configuration.");
453            }
454            mChannels = channelConfig;
455            mChannelCount = Integer.bitCount(channelConfig);
456        }
457
458        //--------------
459        // audio format
460        if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
461            audioFormat = AudioFormat.ENCODING_PCM_16BIT;
462        }
463
464        if (!AudioFormat.isValidEncoding(audioFormat)) {
465            throw new IllegalArgumentException("Unsupported audio encoding.");
466        }
467        mAudioFormat = audioFormat;
468
469        //--------------
470        // audio load mode
471        if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
472                ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
473            throw new IllegalArgumentException("Invalid mode.");
474        }
475        mDataLoadMode = mode;
476    }
477
478    /**
479     * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
480     * @param channelConfig the mask to validate
481     * @return false if the AudioTrack can't be used with such a mask
482     */
483    private static boolean isMultichannelConfigSupported(int channelConfig) {
484        // check for unsupported channels
485        if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
486            loge("Channel configuration features unsupported channels");
487            return false;
488        }
489        // check for unsupported multichannel combinations:
490        // - FL/FR must be present
491        // - L/R channels must be paired (e.g. no single L channel)
492        final int frontPair =
493                AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
494        if ((channelConfig & frontPair) != frontPair) {
495                loge("Front channels must be present in multichannel configurations");
496                return false;
497        }
498        final int backPair =
499                AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
500        if ((channelConfig & backPair) != 0) {
501            if ((channelConfig & backPair) != backPair) {
502                loge("Rear channels can't be used independently");
503                return false;
504            }
505        }
506        return true;
507    }
508
509
510    // Convenience method for the constructor's audio buffer size check.
511    // preconditions:
512    //    mChannelCount is valid
513    //    mAudioFormat is valid
514    // postcondition:
515    //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
516    private void audioBuffSizeCheck(int audioBufferSize) {
517        // NB: this section is only valid with PCM data.
518        //     To update when supporting compressed formats
519        int frameSizeInBytes;
520        if (AudioFormat.isEncodingLinearPcm(mAudioFormat)) {
521            frameSizeInBytes = mChannelCount
522                    * (AudioFormat.getBytesPerSample(mAudioFormat));
523        } else {
524            frameSizeInBytes = 1;
525        }
526        if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
527            throw new IllegalArgumentException("Invalid audio buffer size.");
528        }
529
530        mNativeBufferSizeInBytes = audioBufferSize;
531        mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
532    }
533
534
535    /**
536     * Releases the native AudioTrack resources.
537     */
538    public void release() {
539        // even though native_release() stops the native AudioTrack, we need to stop
540        // AudioTrack subclasses too.
541        try {
542            stop();
543        } catch(IllegalStateException ise) {
544            // don't raise an exception, we're releasing the resources.
545        }
546        native_release();
547        mState = STATE_UNINITIALIZED;
548    }
549
550    @Override
551    protected void finalize() {
552        native_finalize();
553    }
554
555    //--------------------------------------------------------------------------
556    // Getters
557    //--------------------
558    /**
559     * Returns the minimum gain value, which is the constant 0.0.
560     * Gain values less than 0.0 will be clamped to 0.0.
561     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
562     * @return the minimum value, which is the constant 0.0.
563     */
564    static public float getMinVolume() {
565        return GAIN_MIN;
566    }
567
568    /**
569     * Returns the maximum gain value, which is greater than or equal to 1.0.
570     * Gain values greater than the maximum will be clamped to the maximum.
571     * <p>The word "volume" in the API name is historical; this is actually a gain.
572     * expressed as a linear multiplier on sample values, where a maximum value of 1.0
573     * corresponds to a gain of 0 dB (sample values left unmodified).
574     * @return the maximum value, which is greater than or equal to 1.0.
575     */
576    static public float getMaxVolume() {
577        return GAIN_MAX;
578    }
579
580    /**
581     * Returns the configured audio data sample rate in Hz
582     */
583    public int getSampleRate() {
584        return mSampleRate;
585    }
586
587    /**
588     * Returns the current playback rate in Hz.
589     */
590    public int getPlaybackRate() {
591        return native_get_playback_rate();
592    }
593
594    /**
595     * Returns the configured audio data format. See {@link AudioFormat#ENCODING_PCM_16BIT}
596     * and {@link AudioFormat#ENCODING_PCM_8BIT}.
597     */
598    public int getAudioFormat() {
599        return mAudioFormat;
600    }
601
602    /**
603     * Returns the type of audio stream this AudioTrack is configured for.
604     * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
605     * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
606     * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
607     * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
608     */
609    public int getStreamType() {
610        return mStreamType;
611    }
612
613    /**
614     * Returns the configured channel configuration.
615     * See {@link AudioFormat#CHANNEL_OUT_MONO}
616     * and {@link AudioFormat#CHANNEL_OUT_STEREO}.
617     */
618    public int getChannelConfiguration() {
619        return mChannelConfiguration;
620    }
621
622    /**
623     * Returns the configured number of channels.
624     */
625    public int getChannelCount() {
626        return mChannelCount;
627    }
628
629    /**
630     * Returns the state of the AudioTrack instance. This is useful after the
631     * AudioTrack instance has been created to check if it was initialized
632     * properly. This ensures that the appropriate resources have been acquired.
633     * @see #STATE_INITIALIZED
634     * @see #STATE_NO_STATIC_DATA
635     * @see #STATE_UNINITIALIZED
636     */
637    public int getState() {
638        return mState;
639    }
640
641    /**
642     * Returns the playback state of the AudioTrack instance.
643     * @see #PLAYSTATE_STOPPED
644     * @see #PLAYSTATE_PAUSED
645     * @see #PLAYSTATE_PLAYING
646     */
647    public int getPlayState() {
648        synchronized (mPlayStateLock) {
649            return mPlayState;
650        }
651    }
652
653    /**
654     *  Returns the "native frame count", derived from the bufferSizeInBytes specified at
655     *  creation time and converted to frame units.
656     *  If track's creation mode is {@link #MODE_STATIC},
657     *  it is equal to the specified bufferSizeInBytes converted to frame units.
658     *  If track's creation mode is {@link #MODE_STREAM},
659     *  it is typically greater than or equal to the specified bufferSizeInBytes converted to frame
660     *  units; it may be rounded up to a larger value if needed by the target device implementation.
661     *  @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
662     *  See {@link AudioManager#getProperty(String)} for key
663     *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
664     */
665    @Deprecated
666    protected int getNativeFrameCount() {
667        return native_get_native_frame_count();
668    }
669
670    /**
671     * Returns marker position expressed in frames.
672     * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
673     * or zero if marker is disabled.
674     */
675    public int getNotificationMarkerPosition() {
676        return native_get_marker_pos();
677    }
678
679    /**
680     * Returns the notification update period expressed in frames.
681     * Zero means that no position update notifications are being delivered.
682     */
683    public int getPositionNotificationPeriod() {
684        return native_get_pos_update_period();
685    }
686
687    /**
688     * Returns the playback head position expressed in frames.
689     * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
690     * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
691     * This is a continuously advancing counter.  It will wrap (overflow) periodically,
692     * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
693     * It is reset to zero by flush(), reload(), and stop().
694     */
695    public int getPlaybackHeadPosition() {
696        return native_get_position();
697    }
698
699    /**
700     * Returns this track's estimated latency in milliseconds. This includes the latency due
701     * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
702     *
703     * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
704     * a better solution.
705     * @hide
706     */
707    public int getLatency() {
708        return native_get_latency();
709    }
710
711    /**
712     *  Returns the output sample rate in Hz for the specified stream type.
713     */
714    static public int getNativeOutputSampleRate(int streamType) {
715        return native_get_output_sample_rate(streamType);
716    }
717
718    /**
719     * Returns the minimum buffer size required for the successful creation of an AudioTrack
720     * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
721     * guarantee a smooth playback under load, and higher values should be chosen according to
722     * the expected frequency at which the buffer will be refilled with additional data to play.
723     * For example, if you intend to dynamically set the source sample rate of an AudioTrack
724     * to a higher value than the initial source sample rate, be sure to configure the buffer size
725     * based on the highest planned sample rate.
726     * @param sampleRateInHz the source sample rate expressed in Hz.
727     * @param channelConfig describes the configuration of the audio channels.
728     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
729     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
730     * @param audioFormat the format in which the audio data is represented.
731     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
732     *   {@link AudioFormat#ENCODING_PCM_8BIT},
733     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
734     * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
735     *   or {@link #ERROR} if unable to query for output properties,
736     *   or the minimum buffer size expressed in bytes.
737     */
738    static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
739        int channelCount = 0;
740        switch(channelConfig) {
741        case AudioFormat.CHANNEL_OUT_MONO:
742        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
743            channelCount = 1;
744            break;
745        case AudioFormat.CHANNEL_OUT_STEREO:
746        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
747            channelCount = 2;
748            break;
749        default:
750            if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
751                // input channel configuration features unsupported channels
752                loge("getMinBufferSize(): Invalid channel configuration.");
753                return ERROR_BAD_VALUE;
754            } else {
755                channelCount = Integer.bitCount(channelConfig);
756            }
757        }
758
759        if (!AudioFormat.isValidEncoding(audioFormat)) {
760            loge("getMinBufferSize(): Invalid audio format.");
761            return ERROR_BAD_VALUE;
762        }
763
764        // sample rate, note these values are subject to change
765        if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {
766            loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
767            return ERROR_BAD_VALUE;
768        }
769
770        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
771        if (size <= 0) {
772            loge("getMinBufferSize(): error querying hardware");
773            return ERROR;
774        }
775        else {
776            return size;
777        }
778    }
779
780    /**
781     * Returns the audio session ID.
782     *
783     * @return the ID of the audio session this AudioTrack belongs to.
784     */
785    public int getAudioSessionId() {
786        return mSessionId;
787    }
788
789   /**
790    * Poll for a timestamp on demand.
791    * <p>
792    * If you need to track timestamps during initial warmup or after a routing or mode change,
793    * you should request a new timestamp once per second until the reported timestamps
794    * show that the audio clock is stable.
795    * Thereafter, query for a new timestamp approximately once every 10 seconds to once per minute.
796    * Calling this method more often is inefficient.
797    * It is also counter-productive to call this method more often than recommended,
798    * because the short-term differences between successive timestamp reports are not meaningful.
799    * If you need a high-resolution mapping between frame position and presentation time,
800    * consider implementing that at application level, based on low-resolution timestamps.
801    * <p>
802    * The audio data at the returned position may either already have been
803    * presented, or may have not yet been presented but is committed to be presented.
804    * It is not possible to request the time corresponding to a particular position,
805    * or to request the (fractional) position corresponding to a particular time.
806    * If you need such features, consider implementing them at application level.
807    *
808    * @param timestamp a reference to a non-null AudioTimestamp instance allocated
809    *        and owned by caller.
810    * @return true if a timestamp is available, or false if no timestamp is available.
811    *         If a timestamp if available,
812    *         the AudioTimestamp instance is filled in with a position in frame units, together
813    *         with the estimated time when that frame was presented or is committed to
814    *         be presented.
815    *         In the case that no timestamp is available, any supplied instance is left unaltered.
816    *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
817    *         or during and immediately after a route change.
818    */
819    // Add this text when the "on new timestamp" API is added:
820    //   Use if you need to get the most recent timestamp outside of the event callback handler.
821    public boolean getTimestamp(AudioTimestamp timestamp)
822    {
823        if (timestamp == null) {
824            throw new IllegalArgumentException();
825        }
826        // It's unfortunate, but we have to either create garbage every time or use synchronized
827        long[] longArray = new long[2];
828        int ret = native_get_timestamp(longArray);
829        if (ret != SUCCESS) {
830            return false;
831        }
832        timestamp.framePosition = longArray[0];
833        timestamp.nanoTime = longArray[1];
834        return true;
835    }
836
837
838    //--------------------------------------------------------------------------
839    // Initialization / configuration
840    //--------------------
841    /**
842     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
843     * for each periodic playback head position update.
844     * Notifications will be received in the same thread as the one in which the AudioTrack
845     * instance was created.
846     * @param listener
847     */
848    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
849        setPlaybackPositionUpdateListener(listener, null);
850    }
851
852    /**
853     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
854     * for each periodic playback head position update.
855     * Use this method to receive AudioTrack events in the Handler associated with another
856     * thread than the one in which you created the AudioTrack instance.
857     * @param listener
858     * @param handler the Handler that will receive the event notification messages.
859     */
860    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
861                                                    Handler handler) {
862        if (listener != null) {
863            mEventHandlerDelegate = new NativeEventHandlerDelegate(this, listener, handler);
864        } else {
865            mEventHandlerDelegate = null;
866        }
867    }
868
869
870    private static float clampGainOrLevel(float gainOrLevel) {
871        if (Float.isNaN(gainOrLevel)) {
872            throw new IllegalArgumentException();
873        }
874        if (gainOrLevel < GAIN_MIN) {
875            gainOrLevel = GAIN_MIN;
876        } else if (gainOrLevel > GAIN_MAX) {
877            gainOrLevel = GAIN_MAX;
878        }
879        return gainOrLevel;
880    }
881
882
883     /**
884     * Sets the specified left and right output gain values on the AudioTrack.
885     * <p>Gain values are clamped to the closed interval [0.0, max] where
886     * max is the value of {@link #getMaxVolume}.
887     * A value of 0.0 results in zero gain (silence), and
888     * a value of 1.0 means unity gain (signal unchanged).
889     * The default value is 1.0 meaning unity gain.
890     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
891     * @param leftGain output gain for the left channel.
892     * @param rightGain output gain for the right channel
893     * @return error code or success, see {@link #SUCCESS},
894     *    {@link #ERROR_INVALID_OPERATION}
895     * @deprecated Applications should use {@link #setVolume} instead, as it
896     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
897     */
898    public int setStereoVolume(float leftGain, float rightGain) {
899        if (isRestricted()) {
900            return SUCCESS;
901        }
902        if (mState == STATE_UNINITIALIZED) {
903            return ERROR_INVALID_OPERATION;
904        }
905
906        leftGain = clampGainOrLevel(leftGain);
907        rightGain = clampGainOrLevel(rightGain);
908
909        native_setVolume(leftGain, rightGain);
910
911        return SUCCESS;
912    }
913
914
915    /**
916     * Sets the specified output gain value on all channels of this track.
917     * <p>Gain values are clamped to the closed interval [0.0, max] where
918     * max is the value of {@link #getMaxVolume}.
919     * A value of 0.0 results in zero gain (silence), and
920     * a value of 1.0 means unity gain (signal unchanged).
921     * The default value is 1.0 meaning unity gain.
922     * <p>This API is preferred over {@link #setStereoVolume}, as it
923     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
924     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
925     * @param gain output gain for all channels.
926     * @return error code or success, see {@link #SUCCESS},
927     *    {@link #ERROR_INVALID_OPERATION}
928     */
929    public int setVolume(float gain) {
930        return setStereoVolume(gain, gain);
931    }
932
933
934    /**
935     * Sets the playback sample rate for this track. This sets the sampling rate at which
936     * the audio data will be consumed and played back
937     * (as set by the sampleRateInHz parameter in the
938     * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
939     * not the original sampling rate of the
940     * content. For example, setting it to half the sample rate of the content will cause the
941     * playback to last twice as long, but will also result in a pitch shift down by one octave.
942     * The valid sample rate range is from 1 Hz to twice the value returned by
943     * {@link #getNativeOutputSampleRate(int)}.
944     * @param sampleRateInHz the sample rate expressed in Hz
945     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
946     *    {@link #ERROR_INVALID_OPERATION}
947     */
948    public int setPlaybackRate(int sampleRateInHz) {
949        if (mState != STATE_INITIALIZED) {
950            return ERROR_INVALID_OPERATION;
951        }
952        if (sampleRateInHz <= 0) {
953            return ERROR_BAD_VALUE;
954        }
955        return native_set_playback_rate(sampleRateInHz);
956    }
957
958
959    /**
960     * Sets the position of the notification marker.  At most one marker can be active.
961     * @param markerInFrames marker position in wrapping frame units similar to
962     * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
963     * To set a marker at a position which would appear as zero due to wraparound,
964     * a workaround is to use a non-zero position near zero, such as -1 or 1.
965     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
966     *  {@link #ERROR_INVALID_OPERATION}
967     */
968    public int setNotificationMarkerPosition(int markerInFrames) {
969        if (mState == STATE_UNINITIALIZED) {
970            return ERROR_INVALID_OPERATION;
971        }
972        return native_set_marker_pos(markerInFrames);
973    }
974
975
976    /**
977     * Sets the period for the periodic notification event.
978     * @param periodInFrames update period expressed in frames
979     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
980     */
981    public int setPositionNotificationPeriod(int periodInFrames) {
982        if (mState == STATE_UNINITIALIZED) {
983            return ERROR_INVALID_OPERATION;
984        }
985        return native_set_pos_update_period(periodInFrames);
986    }
987
988
989    /**
990     * Sets the playback head position.
991     * The track must be stopped or paused for the position to be changed,
992     * and must use the {@link #MODE_STATIC} mode.
993     * @param positionInFrames playback head position expressed in frames
994     * Zero corresponds to start of buffer.
995     * The position must not be greater than the buffer size in frames, or negative.
996     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
997     *    {@link #ERROR_INVALID_OPERATION}
998     */
999    public int setPlaybackHeadPosition(int positionInFrames) {
1000        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1001                getPlayState() == PLAYSTATE_PLAYING) {
1002            return ERROR_INVALID_OPERATION;
1003        }
1004        if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
1005            return ERROR_BAD_VALUE;
1006        }
1007        return native_set_position(positionInFrames);
1008    }
1009
1010    /**
1011     * Sets the loop points and the loop count. The loop can be infinite.
1012     * Similarly to setPlaybackHeadPosition,
1013     * the track must be stopped or paused for the loop points to be changed,
1014     * and must use the {@link #MODE_STATIC} mode.
1015     * @param startInFrames loop start marker expressed in frames
1016     * Zero corresponds to start of buffer.
1017     * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1018     * @param endInFrames loop end marker expressed in frames
1019     * The total buffer size in frames corresponds to end of buffer.
1020     * The end marker must not be greater than the buffer size in frames.
1021     * For looping, the end marker must not be less than or equal to the start marker,
1022     * but to disable looping
1023     * it is permitted for start marker, end marker, and loop count to all be 0.
1024     * @param loopCount the number of times the loop is looped.
1025     *    A value of -1 means infinite looping, and 0 disables looping.
1026     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1027     *    {@link #ERROR_INVALID_OPERATION}
1028     */
1029    public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1030        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1031                getPlayState() == PLAYSTATE_PLAYING) {
1032            return ERROR_INVALID_OPERATION;
1033        }
1034        if (loopCount == 0) {
1035            ;   // explicitly allowed as an exception to the loop region range check
1036        } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1037                startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1038            return ERROR_BAD_VALUE;
1039        }
1040        return native_set_loop(startInFrames, endInFrames, loopCount);
1041    }
1042
1043    /**
1044     * Sets the initialization state of the instance. This method was originally intended to be used
1045     * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1046     * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1047     * @param state the state of the AudioTrack instance
1048     * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1049     */
1050    @Deprecated
1051    protected void setState(int state) {
1052        mState = state;
1053    }
1054
1055
1056    //---------------------------------------------------------
1057    // Transport control methods
1058    //--------------------
1059    /**
1060     * Starts playing an AudioTrack.
1061     * If track's creation mode is {@link #MODE_STATIC}, you must have called write() prior.
1062     *
1063     * @throws IllegalStateException
1064     */
1065    public void play()
1066    throws IllegalStateException {
1067        if (mState != STATE_INITIALIZED) {
1068            throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1069        }
1070        if (isRestricted()) {
1071            setVolume(0);
1072        }
1073        synchronized(mPlayStateLock) {
1074            native_start();
1075            mPlayState = PLAYSTATE_PLAYING;
1076        }
1077    }
1078
1079    private boolean isRestricted() {
1080        try {
1081            final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, mStreamType,
1082                    Process.myUid(), ActivityThread.currentPackageName());
1083            return mode != AppOpsManager.MODE_ALLOWED;
1084        } catch (RemoteException e) {
1085            return false;
1086        }
1087    }
1088
1089    /**
1090     * Stops playing the audio data.
1091     * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
1092     * after the last buffer that was written has been played. For an immediate stop, use
1093     * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
1094     * back yet.
1095     * @throws IllegalStateException
1096     */
1097    public void stop()
1098    throws IllegalStateException {
1099        if (mState != STATE_INITIALIZED) {
1100            throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
1101        }
1102
1103        // stop playing
1104        synchronized(mPlayStateLock) {
1105            native_stop();
1106            mPlayState = PLAYSTATE_STOPPED;
1107        }
1108    }
1109
1110    /**
1111     * Pauses the playback of the audio data. Data that has not been played
1112     * back will not be discarded. Subsequent calls to {@link #play} will play
1113     * this data back. See {@link #flush()} to discard this data.
1114     *
1115     * @throws IllegalStateException
1116     */
1117    public void pause()
1118    throws IllegalStateException {
1119        if (mState != STATE_INITIALIZED) {
1120            throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
1121        }
1122        //logd("pause()");
1123
1124        // pause playback
1125        synchronized(mPlayStateLock) {
1126            native_pause();
1127            mPlayState = PLAYSTATE_PAUSED;
1128        }
1129    }
1130
1131
1132    //---------------------------------------------------------
1133    // Audio data supply
1134    //--------------------
1135
1136    /**
1137     * Flushes the audio data currently queued for playback. Any data that has
1138     * not been played back will be discarded.  No-op if not stopped or paused,
1139     * or if the track's creation mode is not {@link #MODE_STREAM}.
1140     */
1141    public void flush() {
1142        if (mState == STATE_INITIALIZED) {
1143            // flush the data in native layer
1144            native_flush();
1145        }
1146
1147    }
1148
1149    /**
1150     * Writes the audio data to the audio sink for playback (streaming mode),
1151     * or copies audio data for later playback (static buffer mode).
1152     * In streaming mode, will block until all data has been written to the audio sink.
1153     * In static buffer mode, copies the data to the buffer starting at offset 0.
1154     * Note that the actual playback of this data might occur after this function
1155     * returns. This function is thread safe with respect to {@link #stop} calls,
1156     * in which case all of the specified data might not be written to the audio sink.
1157     *
1158     * @param audioData the array that holds the data to play.
1159     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
1160     *    starts.
1161     * @param sizeInBytes the number of bytes to read in audioData after the offset.
1162     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
1163     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1164     *    the parameters don't resolve to valid data and indexes, or
1165     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1166     *    needs to be recreated.
1167     */
1168
1169    public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) {
1170
1171        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1172            return ERROR_INVALID_OPERATION;
1173        }
1174
1175        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
1176                || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
1177                || (offsetInBytes + sizeInBytes > audioData.length)) {
1178            return ERROR_BAD_VALUE;
1179        }
1180
1181        int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
1182                true /*isBlocking*/);
1183
1184        if ((mDataLoadMode == MODE_STATIC)
1185                && (mState == STATE_NO_STATIC_DATA)
1186                && (ret > 0)) {
1187            // benign race with respect to other APIs that read mState
1188            mState = STATE_INITIALIZED;
1189        }
1190
1191        return ret;
1192    }
1193
1194
1195    /**
1196     * Writes the audio data to the audio sink for playback (streaming mode),
1197     * or copies audio data for later playback (static buffer mode).
1198     * In streaming mode, will block until all data has been written to the audio sink.
1199     * In static buffer mode, copies the data to the buffer starting at offset 0.
1200     * Note that the actual playback of this data might occur after this function
1201     * returns. This function is thread safe with respect to {@link #stop} calls,
1202     * in which case all of the specified data might not be written to the audio sink.
1203     *
1204     * @param audioData the array that holds the data to play.
1205     * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1206     *     starts.
1207     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1208     * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION}
1209     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1210     *    the parameters don't resolve to valid data and indexes.
1211     */
1212
1213    public int write(short[] audioData, int offsetInShorts, int sizeInShorts) {
1214
1215        if (mState == STATE_UNINITIALIZED || mAudioFormat != AudioFormat.ENCODING_PCM_16BIT) {
1216            return ERROR_INVALID_OPERATION;
1217        }
1218
1219        if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
1220                || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
1221                || (offsetInShorts + sizeInShorts > audioData.length)) {
1222            return ERROR_BAD_VALUE;
1223        }
1224
1225        int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat);
1226
1227        if ((mDataLoadMode == MODE_STATIC)
1228                && (mState == STATE_NO_STATIC_DATA)
1229                && (ret > 0)) {
1230            // benign race with respect to other APIs that read mState
1231            mState = STATE_INITIALIZED;
1232        }
1233
1234        return ret;
1235    }
1236
1237
1238    /**
1239     * Writes the audio data to the audio sink for playback (streaming mode),
1240     * or copies audio data for later playback (static buffer mode).
1241     * In static buffer mode, copies the data to the buffer starting at offset 0,
1242     * and the write mode is ignored.
1243     * In streaming mode, the blocking behavior will depend on the write mode.
1244     * <p>
1245     * Note that the actual playback of this data might occur after this function
1246     * returns. This function is thread safe with respect to {@link #stop} calls,
1247     * in which case all of the specified data might not be written to the audio sink.
1248     * <p>
1249     * @param audioData the array that holds the data to play.
1250     *     The implementation does not clip for sample values within the nominal range
1251     *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
1252     *     less than or equal to unity (1.0f), and in the absence of post-processing effects
1253     *     that could add energy, such as reverb.  For the convenience of applications
1254     *     that compute samples using filters with non-unity gain,
1255     *     sample values +3 dB beyond the nominal range are permitted.
1256     *     However such values may eventually be limited or clipped, depending on various gains
1257     *     and later processing in the audio path.  Therefore applications are encouraged
1258     *     to provide samples values within the nominal range.
1259     * @param offsetInFloats the offset, expressed as a number of floats,
1260     *     in audioData where the data to play starts.
1261     * @param sizeInFloats the number of floats to read in audioData after the offset.
1262     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1263     *     effect in static mode.
1264     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1265     *         to the audio sink.
1266     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1267     *     queuing as much audio data for playback as possible without blocking.
1268     * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION}
1269     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1270     *    the parameters don't resolve to valid data and indexes.
1271     */
1272    public int write(float[] audioData, int offsetInFloats, int sizeInFloats,
1273            @WriteMode int writeMode) {
1274
1275        if (mState == STATE_UNINITIALIZED) {
1276            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1277            return ERROR_INVALID_OPERATION;
1278        }
1279
1280        if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
1281            Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
1282            return ERROR_INVALID_OPERATION;
1283        }
1284
1285        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1286            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1287            return ERROR_BAD_VALUE;
1288        }
1289
1290        if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
1291                || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
1292                || (offsetInFloats + sizeInFloats > audioData.length)) {
1293            Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
1294            return ERROR_BAD_VALUE;
1295        }
1296
1297        int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
1298                writeMode == WRITE_BLOCKING);
1299
1300        if ((mDataLoadMode == MODE_STATIC)
1301                && (mState == STATE_NO_STATIC_DATA)
1302                && (ret > 0)) {
1303            // benign race with respect to other APIs that read mState
1304            mState = STATE_INITIALIZED;
1305        }
1306
1307        return ret;
1308    }
1309
1310
1311    /**
1312     * Writes the audio data to the audio sink for playback (streaming mode),
1313     * or copies audio data for later playback (static buffer mode).
1314     * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write
1315     * mode is ignored.
1316     * In streaming mode, the blocking behavior will depend on the write mode.
1317     * @param audioData the buffer that holds the data to play, starting at the position reported
1318     *     by <code>audioData.position()</code>.
1319     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
1320     *     have been advanced to reflect the amount of data that was successfully written to
1321     *     the AudioTrack.
1322     * @param sizeInBytes number of bytes to write.
1323     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
1324     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1325     *     effect in static mode.
1326     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1327     *         to the audio sink.
1328     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1329     *     queuing as much audio data for playback as possible without blocking.
1330     * @return 0 or a positive number of bytes that were written, or
1331     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1332     */
1333    public int write(ByteBuffer audioData, int sizeInBytes,
1334            @WriteMode int writeMode) {
1335
1336        if (mState == STATE_UNINITIALIZED) {
1337            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1338            return ERROR_INVALID_OPERATION;
1339        }
1340
1341        if (mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1342            Log.e(TAG, "AudioTrack.write(ByteBuffer ...) not yet supported for ENCODING_PCM_FLOAT");
1343            return ERROR_INVALID_OPERATION;
1344        }
1345
1346        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1347            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1348            return ERROR_BAD_VALUE;
1349        }
1350
1351        if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
1352            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
1353            return ERROR_BAD_VALUE;
1354        }
1355
1356        int ret = 0;
1357        if (audioData.isDirect()) {
1358            ret = native_write_native_bytes(audioData,
1359                    audioData.position(), sizeInBytes, mAudioFormat,
1360                    writeMode == WRITE_BLOCKING);
1361        } else {
1362            ret = native_write_byte(NioUtils.unsafeArray(audioData),
1363                    NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
1364                    sizeInBytes, mAudioFormat,
1365                    writeMode == WRITE_BLOCKING);
1366        }
1367
1368        if ((mDataLoadMode == MODE_STATIC)
1369                && (mState == STATE_NO_STATIC_DATA)
1370                && (ret > 0)) {
1371            // benign race with respect to other APIs that read mState
1372            mState = STATE_INITIALIZED;
1373        }
1374
1375        if (ret > 0) {
1376            audioData.position(audioData.position() + ret);
1377        }
1378
1379        return ret;
1380    }
1381
1382    /**
1383     * Notifies the native resource to reuse the audio data already loaded in the native
1384     * layer, that is to rewind to start of buffer.
1385     * The track's creation mode must be {@link #MODE_STATIC}.
1386     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1387     *  {@link #ERROR_INVALID_OPERATION}
1388     */
1389    public int reloadStaticData() {
1390        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
1391            return ERROR_INVALID_OPERATION;
1392        }
1393        return native_reload_static();
1394    }
1395
1396    //--------------------------------------------------------------------------
1397    // Audio effects management
1398    //--------------------
1399
1400    /**
1401     * Attaches an auxiliary effect to the audio track. A typical auxiliary
1402     * effect is a reverberation effect which can be applied on any sound source
1403     * that directs a certain amount of its energy to this effect. This amount
1404     * is defined by setAuxEffectSendLevel().
1405     * {@see #setAuxEffectSendLevel(float)}.
1406     * <p>After creating an auxiliary effect (e.g.
1407     * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
1408     * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
1409     * this method to attach the audio track to the effect.
1410     * <p>To detach the effect from the audio track, call this method with a
1411     * null effect id.
1412     *
1413     * @param effectId system wide unique id of the effect to attach
1414     * @return error code or success, see {@link #SUCCESS},
1415     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
1416     */
1417    public int attachAuxEffect(int effectId) {
1418        if (mState == STATE_UNINITIALIZED) {
1419            return ERROR_INVALID_OPERATION;
1420        }
1421        return native_attachAuxEffect(effectId);
1422    }
1423
1424    /**
1425     * Sets the send level of the audio track to the attached auxiliary effect
1426     * {@link #attachAuxEffect(int)}.  Effect levels
1427     * are clamped to the closed interval [0.0, max] where
1428     * max is the value of {@link #getMaxVolume}.
1429     * A value of 0.0 results in no effect, and a value of 1.0 is full send.
1430     * <p>By default the send level is 0.0f, so even if an effect is attached to the player
1431     * this method must be called for the effect to be applied.
1432     * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
1433     * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
1434     * so an appropriate conversion from linear UI input x to level is:
1435     * x == 0 -&gt; level = 0
1436     * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
1437     *
1438     * @param level linear send level
1439     * @return error code or success, see {@link #SUCCESS},
1440     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
1441     */
1442    public int setAuxEffectSendLevel(float level) {
1443        if (isRestricted()) {
1444            return SUCCESS;
1445        }
1446        if (mState == STATE_UNINITIALIZED) {
1447            return ERROR_INVALID_OPERATION;
1448        }
1449        level = clampGainOrLevel(level);
1450        int err = native_setAuxEffectSendLevel(level);
1451        return err == 0 ? SUCCESS : ERROR;
1452    }
1453
1454    //---------------------------------------------------------
1455    // Interface definitions
1456    //--------------------
1457    /**
1458     * Interface definition for a callback to be invoked when the playback head position of
1459     * an AudioTrack has reached a notification marker or has increased by a certain period.
1460     */
1461    public interface OnPlaybackPositionUpdateListener  {
1462        /**
1463         * Called on the listener to notify it that the previously set marker has been reached
1464         * by the playback head.
1465         */
1466        void onMarkerReached(AudioTrack track);
1467
1468        /**
1469         * Called on the listener to periodically notify it that the playback head has reached
1470         * a multiple of the notification period.
1471         */
1472        void onPeriodicNotification(AudioTrack track);
1473    }
1474
1475    //---------------------------------------------------------
1476    // Inner classes
1477    //--------------------
1478    /**
1479     * Helper class to handle the forwarding of native events to the appropriate listener
1480     * (potentially) handled in a different thread
1481     */
1482    private class NativeEventHandlerDelegate {
1483        private final Handler mHandler;
1484
1485        NativeEventHandlerDelegate(final AudioTrack track,
1486                                   final OnPlaybackPositionUpdateListener listener,
1487                                   Handler handler) {
1488            // find the looper for our new event handler
1489            Looper looper;
1490            if (handler != null) {
1491                looper = handler.getLooper();
1492            } else {
1493                // no given handler, use the looper the AudioTrack was created in
1494                looper = mInitializationLooper;
1495            }
1496
1497            // construct the event handler with this looper
1498            if (looper != null) {
1499                // implement the event handler delegate
1500                mHandler = new Handler(looper) {
1501                    @Override
1502                    public void handleMessage(Message msg) {
1503                        if (track == null) {
1504                            return;
1505                        }
1506                        switch(msg.what) {
1507                        case NATIVE_EVENT_MARKER:
1508                            if (listener != null) {
1509                                listener.onMarkerReached(track);
1510                            }
1511                            break;
1512                        case NATIVE_EVENT_NEW_POS:
1513                            if (listener != null) {
1514                                listener.onPeriodicNotification(track);
1515                            }
1516                            break;
1517                        default:
1518                            loge("Unknown native event type: " + msg.what);
1519                            break;
1520                        }
1521                    }
1522                };
1523            } else {
1524                mHandler = null;
1525            }
1526        }
1527
1528        Handler getHandler() {
1529            return mHandler;
1530        }
1531    }
1532
1533
1534    //---------------------------------------------------------
1535    // Java methods called from the native side
1536    //--------------------
1537    @SuppressWarnings("unused")
1538    private static void postEventFromNative(Object audiotrack_ref,
1539            int what, int arg1, int arg2, Object obj) {
1540        //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
1541        AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
1542        if (track == null) {
1543            return;
1544        }
1545
1546        NativeEventHandlerDelegate delegate = track.mEventHandlerDelegate;
1547        if (delegate != null) {
1548            Handler handler = delegate.getHandler();
1549            if (handler != null) {
1550                Message m = handler.obtainMessage(what, arg1, arg2, obj);
1551                handler.sendMessage(m);
1552            }
1553        }
1554
1555    }
1556
1557
1558    //---------------------------------------------------------
1559    // Native methods called from the Java side
1560    //--------------------
1561
1562    private native final int native_setup(Object audiotrack_this,
1563            int streamType, int sampleRate, int channelMask, int audioFormat,
1564            int buffSizeInBytes, int mode, int[] sessionId);
1565
1566    private native final void native_finalize();
1567
1568    private native final void native_release();
1569
1570    private native final void native_start();
1571
1572    private native final void native_stop();
1573
1574    private native final void native_pause();
1575
1576    private native final void native_flush();
1577
1578    private native final int native_write_byte(byte[] audioData,
1579                                               int offsetInBytes, int sizeInBytes, int format,
1580                                               boolean isBlocking);
1581
1582    private native final int native_write_short(short[] audioData,
1583                                                int offsetInShorts, int sizeInShorts, int format);
1584
1585    private native final int native_write_float(float[] audioData,
1586                                                int offsetInFloats, int sizeInFloats, int format,
1587                                                boolean isBlocking);
1588
1589    private native final int native_write_native_bytes(Object audioData,
1590            int positionInBytes, int sizeInBytes, int format, boolean blocking);
1591
1592    private native final int native_reload_static();
1593
1594    private native final int native_get_native_frame_count();
1595
1596    private native final void native_setVolume(float leftVolume, float rightVolume);
1597
1598    private native final int native_set_playback_rate(int sampleRateInHz);
1599    private native final int native_get_playback_rate();
1600
1601    private native final int native_set_marker_pos(int marker);
1602    private native final int native_get_marker_pos();
1603
1604    private native final int native_set_pos_update_period(int updatePeriod);
1605    private native final int native_get_pos_update_period();
1606
1607    private native final int native_set_position(int position);
1608    private native final int native_get_position();
1609
1610    private native final int native_get_latency();
1611
1612    // longArray must be a non-null array of length >= 2
1613    // [0] is assigned the frame position
1614    // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
1615    private native final int native_get_timestamp(long[] longArray);
1616
1617    private native final int native_set_loop(int start, int end, int loopCount);
1618
1619    static private native final int native_get_output_sample_rate(int streamType);
1620    static private native final int native_get_min_buff_size(
1621            int sampleRateInHz, int channelConfig, int audioFormat);
1622
1623    private native final int native_attachAuxEffect(int effectId);
1624    private native final int native_setAuxEffectSendLevel(float level);
1625
1626    //---------------------------------------------------------
1627    // Utility methods
1628    //------------------
1629
1630    private static void logd(String msg) {
1631        Log.d(TAG, msg);
1632    }
1633
1634    private static void loge(String msg) {
1635        Log.e(TAG, msg);
1636    }
1637
1638}
1639