AudioTrack.java revision 486918b41584cd2589c46c5217b3fba1e7d18874
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.media;
18
19import java.lang.annotation.Retention;
20import java.lang.annotation.RetentionPolicy;
21import java.lang.ref.WeakReference;
22import java.nio.ByteBuffer;
23import java.nio.NioUtils;
24
25import android.annotation.IntDef;
26import android.app.ActivityThread;
27import android.app.AppOpsManager;
28import android.content.Context;
29import android.os.Handler;
30import android.os.IBinder;
31import android.os.Looper;
32import android.os.Message;
33import android.os.Process;
34import android.os.RemoteException;
35import android.os.ServiceManager;
36import android.util.Log;
37
38import com.android.internal.app.IAppOpsService;
39
40
41/**
42 * The AudioTrack class manages and plays a single audio resource for Java applications.
43 * It allows streaming of PCM audio buffers to the audio sink for playback. This is
44 * achieved by "pushing" the data to the AudioTrack object using one of the
45 *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
46 *  and {@link #write(float[], int, int, int)} methods.
47 *
48 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
49 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
50 * one of the {@code write()} methods. These are blocking and return when the data has been
51 * transferred from the Java layer to the native layer and queued for playback. The streaming
52 * mode is most useful when playing blocks of audio data that for instance are:
53 *
54 * <ul>
55 *   <li>too big to fit in memory because of the duration of the sound to play,</li>
56 *   <li>too big to fit in memory because of the characteristics of the audio data
57 *         (high sampling rate, bits per sample ...)</li>
58 *   <li>received or generated while previously queued audio is playing.</li>
59 * </ul>
60 *
61 * The static mode should be chosen when dealing with short sounds that fit in memory and
62 * that need to be played with the smallest latency possible. The static mode will
63 * therefore be preferred for UI and game sounds that are played often, and with the
64 * smallest overhead possible.
65 *
66 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
67 * The size of this buffer, specified during the construction, determines how long an AudioTrack
68 * can play before running out of data.<br>
69 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
70 * be played from it.<br>
71 * For the streaming mode, data will be written to the audio sink in chunks of
72 * sizes less than or equal to the total buffer size.
73 *
74 * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
75 */
76public class AudioTrack
77{
78    //---------------------------------------------------------
79    // Constants
80    //--------------------
81    /** Minimum value for a linear gain or auxiliary effect level.
82     *  This value must be exactly equal to 0.0f; do not change it.
83     */
84    private static final float GAIN_MIN = 0.0f;
85    /** Maximum value for a linear gain or auxiliary effect level.
86     *  This value must be greater than or equal to 1.0f.
87     */
88    private static final float GAIN_MAX = 1.0f;
89
90    /** Minimum value for sample rate */
91    private static final int SAMPLE_RATE_HZ_MIN = 4000;
92    /** Maximum value for sample rate */
93    private static final int SAMPLE_RATE_HZ_MAX = 48000;
94
95    /** indicates AudioTrack state is stopped */
96    public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
97    /** indicates AudioTrack state is paused */
98    public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
99    /** indicates AudioTrack state is playing */
100    public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
101
102    // keep these values in sync with android_media_AudioTrack.cpp
103    /**
104     * Creation mode where audio data is transferred from Java to the native layer
105     * only once before the audio starts playing.
106     */
107    public static final int MODE_STATIC = 0;
108    /**
109     * Creation mode where audio data is streamed from Java to the native layer
110     * as the audio is playing.
111     */
112    public static final int MODE_STREAM = 1;
113
114    /**
115     * State of an AudioTrack that was not successfully initialized upon creation.
116     */
117    public static final int STATE_UNINITIALIZED = 0;
118    /**
119     * State of an AudioTrack that is ready to be used.
120     */
121    public static final int STATE_INITIALIZED   = 1;
122    /**
123     * State of a successfully initialized AudioTrack that uses static data,
124     * but that hasn't received that data yet.
125     */
126    public static final int STATE_NO_STATIC_DATA = 2;
127
128    /**
129     * Denotes a successful operation.
130     */
131    public  static final int SUCCESS                               = AudioSystem.SUCCESS;
132    /**
133     * Denotes a generic operation failure.
134     */
135    public  static final int ERROR                                 = AudioSystem.ERROR;
136    /**
137     * Denotes a failure due to the use of an invalid value.
138     */
139    public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
140    /**
141     * Denotes a failure due to the improper use of a method.
142     */
143    public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
144
145    // Error codes:
146    // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
147    private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
148    private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
149    private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
150    private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
151    private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
152
153    // Events:
154    // to keep in sync with frameworks/av/include/media/AudioTrack.h
155    /**
156     * Event id denotes when playback head has reached a previously set marker.
157     */
158    private static final int NATIVE_EVENT_MARKER  = 3;
159    /**
160     * Event id denotes when previously set update period has elapsed during playback.
161     */
162    private static final int NATIVE_EVENT_NEW_POS = 4;
163
164    private final static String TAG = "android.media.AudioTrack";
165
166
167    /** @hide */
168    @IntDef({
169        WRITE_BLOCKING,
170        WRITE_NON_BLOCKING
171    })
172    @Retention(RetentionPolicy.SOURCE)
173    public @interface WriteMode {}
174
175    /**
176     * The write mode indicating the write operation will block until all data has been written,
177     * to be used in {@link #write(ByteBuffer, int, int)}
178     */
179    public final static int WRITE_BLOCKING = 0;
180    /**
181     * The write mode indicating the write operation will return immediately after
182     * queuing as much audio data for playback as possible without blocking, to be used in
183     * {@link #write(ByteBuffer, int, int)}.
184     */
185    public final static int WRITE_NON_BLOCKING = 1;
186
187    //--------------------------------------------------------------------------
188    // Member variables
189    //--------------------
190    /**
191     * Indicates the state of the AudioTrack instance.
192     */
193    private int mState = STATE_UNINITIALIZED;
194    /**
195     * Indicates the play state of the AudioTrack instance.
196     */
197    private int mPlayState = PLAYSTATE_STOPPED;
198    /**
199     * Lock to make sure mPlayState updates are reflecting the actual state of the object.
200     */
201    private final Object mPlayStateLock = new Object();
202    /**
203     * Sizes of the native audio buffer.
204     */
205    private int mNativeBufferSizeInBytes = 0;
206    private int mNativeBufferSizeInFrames = 0;
207    /**
208     * Handler for events coming from the native code.
209     */
210    private NativeEventHandlerDelegate mEventHandlerDelegate;
211    /**
212     * Looper associated with the thread that creates the AudioTrack instance.
213     */
214    private final Looper mInitializationLooper;
215    /**
216     * The audio data source sampling rate in Hz.
217     */
218    private int mSampleRate; // initialized by all constructors
219    /**
220     * The number of audio output channels (1 is mono, 2 is stereo).
221     */
222    private int mChannelCount = 1;
223    /**
224     * The audio channel mask.
225     */
226    private int mChannels = AudioFormat.CHANNEL_OUT_MONO;
227
228    /**
229     * The type of the audio stream to play. See
230     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
231     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
232     *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
233     *   {@link AudioManager#STREAM_DTMF}.
234     */
235    private int mStreamType = AudioManager.STREAM_MUSIC;
236    /**
237     * The way audio is consumed by the audio sink, streaming or static.
238     */
239    private int mDataLoadMode = MODE_STREAM;
240    /**
241     * The current audio channel configuration.
242     */
243    private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
244    /**
245     * The encoding of the audio samples.
246     * @see AudioFormat#ENCODING_PCM_8BIT
247     * @see AudioFormat#ENCODING_PCM_16BIT
248     * @see AudioFormat#ENCODING_PCM_FLOAT
249     */
250    private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
251    /**
252     * Audio session ID
253     */
254    private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
255    /**
256     * Reference to the app-ops service.
257     */
258    private final IAppOpsService mAppOps;
259
260    //--------------------------------
261    // Used exclusively by native code
262    //--------------------
263    /**
264     * Accessed by native methods: provides access to C++ AudioTrack object.
265     */
266    @SuppressWarnings("unused")
267    private long mNativeTrackInJavaObj;
268    /**
269     * Accessed by native methods: provides access to the JNI data (i.e. resources used by
270     * the native AudioTrack object, but not stored in it).
271     */
272    @SuppressWarnings("unused")
273    private long mJniData;
274
275
276    //--------------------------------------------------------------------------
277    // Constructor, Finalize
278    //--------------------
279    /**
280     * Class constructor.
281     * @param streamType the type of the audio stream. See
282     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
283     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
284     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
285     * @param sampleRateInHz the initial source sample rate expressed in Hz.
286     * @param channelConfig describes the configuration of the audio channels.
287     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
288     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
289     * @param audioFormat the format in which the audio data is represented.
290     *   See {@link AudioFormat#ENCODING_PCM_16BIT},
291     *   {@link AudioFormat#ENCODING_PCM_8BIT},
292     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
293     * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
294     *   read from for playback.
295     *   If track's creation mode is {@link #MODE_STREAM}, you can write data into
296     *   this buffer in chunks less than or equal to this size, and it is typical to use
297     *   chunks of 1/2 of the total size to permit double-buffering.
298     *   If the track's creation mode is {@link #MODE_STATIC},
299     *   this is the maximum length sample, or audio clip, that can be played by this instance.
300     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
301     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
302     *   smaller than getMinBufferSize() will result in an initialization failure.
303     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
304     * @throws java.lang.IllegalArgumentException
305     */
306    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
307            int bufferSizeInBytes, int mode)
308    throws IllegalArgumentException {
309        this(streamType, sampleRateInHz, channelConfig, audioFormat,
310                bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE);
311    }
312
313    /**
314     * Class constructor with audio session. Use this constructor when the AudioTrack must be
315     * attached to a particular audio session. The primary use of the audio session ID is to
316     * associate audio effects to a particular instance of AudioTrack: if an audio session ID
317     * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
318     * and media players in the same session and not to the output mix.
319     * When an AudioTrack is created without specifying a session, it will create its own session
320     * which can be retrieved by calling the {@link #getAudioSessionId()} method.
321     * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
322     * session
323     * with all other media players or audio tracks in the same session, otherwise a new session
324     * will be created for this track if none is supplied.
325     * @param streamType the type of the audio stream. See
326     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
327     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
328     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
329     * @param sampleRateInHz the initial source sample rate expressed in Hz.
330     * @param channelConfig describes the configuration of the audio channels.
331     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
332     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
333     * @param audioFormat the format in which the audio data is represented.
334     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
335     *   {@link AudioFormat#ENCODING_PCM_8BIT},
336     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
337     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
338     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
339     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
340     *   this is the maximum size of the sound that will be played for this instance.
341     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
342     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
343     *   smaller than getMinBufferSize() will result in an initialization failure.
344     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
345     * @param sessionId Id of audio session the AudioTrack must be attached to
346     * @throws java.lang.IllegalArgumentException
347     */
348    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
349            int bufferSizeInBytes, int mode, int sessionId)
350    throws IllegalArgumentException {
351        // mState already == STATE_UNINITIALIZED
352
353        // remember which looper is associated with the AudioTrack instantiation
354        Looper looper;
355        if ((looper = Looper.myLooper()) == null) {
356            looper = Looper.getMainLooper();
357        }
358        mInitializationLooper = looper;
359
360        audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);
361
362        audioBuffSizeCheck(bufferSizeInBytes);
363
364        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
365        mAppOps = IAppOpsService.Stub.asInterface(b);
366
367        if (sessionId < 0) {
368            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
369        }
370
371        int[] session = new int[1];
372        session[0] = sessionId;
373        // native initialization
374        int initResult = native_setup(new WeakReference<AudioTrack>(this),
375                mStreamType, mSampleRate, mChannels, mAudioFormat,
376                mNativeBufferSizeInBytes, mDataLoadMode, session);
377        if (initResult != SUCCESS) {
378            loge("Error code "+initResult+" when initializing AudioTrack.");
379            return; // with mState == STATE_UNINITIALIZED
380        }
381
382        mSessionId = session[0];
383
384        if (mDataLoadMode == MODE_STATIC) {
385            mState = STATE_NO_STATIC_DATA;
386        } else {
387            mState = STATE_INITIALIZED;
388        }
389    }
390
391    // mask of all the channels supported by this implementation
392    private static final int SUPPORTED_OUT_CHANNELS =
393            AudioFormat.CHANNEL_OUT_FRONT_LEFT |
394            AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
395            AudioFormat.CHANNEL_OUT_FRONT_CENTER |
396            AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
397            AudioFormat.CHANNEL_OUT_BACK_LEFT |
398            AudioFormat.CHANNEL_OUT_BACK_RIGHT |
399            AudioFormat.CHANNEL_OUT_BACK_CENTER;
400
401    // Convenience method for the constructor's parameter checks.
402    // This is where constructor IllegalArgumentException-s are thrown
403    // postconditions:
404    //    mStreamType is valid
405    //    mChannelCount is valid
406    //    mChannels is valid
407    //    mAudioFormat is valid
408    //    mSampleRate is valid
409    //    mDataLoadMode is valid
410    private void audioParamCheck(int streamType, int sampleRateInHz,
411                                 int channelConfig, int audioFormat, int mode) {
412
413        //--------------
414        // stream type
415        if( (streamType != AudioManager.STREAM_ALARM) && (streamType != AudioManager.STREAM_MUSIC)
416           && (streamType != AudioManager.STREAM_RING) && (streamType != AudioManager.STREAM_SYSTEM)
417           && (streamType != AudioManager.STREAM_VOICE_CALL)
418           && (streamType != AudioManager.STREAM_NOTIFICATION)
419           && (streamType != AudioManager.STREAM_BLUETOOTH_SCO)
420           && (streamType != AudioManager.STREAM_DTMF)) {
421            throw new IllegalArgumentException("Invalid stream type.");
422        }
423        mStreamType = streamType;
424
425        //--------------
426        // sample rate, note these values are subject to change
427        if ( (sampleRateInHz < 4000) || (sampleRateInHz > 48000) ) {
428            throw new IllegalArgumentException(sampleRateInHz
429                    + "Hz is not a supported sample rate.");
430        }
431        mSampleRate = sampleRateInHz;
432
433        //--------------
434        // channel config
435        mChannelConfiguration = channelConfig;
436
437        switch (channelConfig) {
438        case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
439        case AudioFormat.CHANNEL_OUT_MONO:
440        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
441            mChannelCount = 1;
442            mChannels = AudioFormat.CHANNEL_OUT_MONO;
443            break;
444        case AudioFormat.CHANNEL_OUT_STEREO:
445        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
446            mChannelCount = 2;
447            mChannels = AudioFormat.CHANNEL_OUT_STEREO;
448            break;
449        default:
450            if (!isMultichannelConfigSupported(channelConfig)) {
451                // input channel configuration features unsupported channels
452                throw new IllegalArgumentException("Unsupported channel configuration.");
453            }
454            mChannels = channelConfig;
455            mChannelCount = Integer.bitCount(channelConfig);
456        }
457
458        //--------------
459        // audio format
460        switch (audioFormat) {
461        case AudioFormat.ENCODING_DEFAULT:
462            mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
463            break;
464        case AudioFormat.ENCODING_PCM_16BIT:
465        case AudioFormat.ENCODING_PCM_8BIT:
466        case AudioFormat.ENCODING_PCM_FLOAT:
467            mAudioFormat = audioFormat;
468            break;
469        default:
470            throw new IllegalArgumentException("Unsupported sample encoding."
471                + " Should be ENCODING_PCM_8BIT or ENCODING_PCM_16BIT"
472                + " or ENCODING_PCM_FLOAT"
473                + ".");
474        }
475
476        //--------------
477        // audio load mode
478        if ( (mode != MODE_STREAM) && (mode != MODE_STATIC) ) {
479            throw new IllegalArgumentException("Invalid mode.");
480        }
481        mDataLoadMode = mode;
482    }
483
484    /**
485     * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
486     * @param channelConfig the mask to validate
487     * @return false if the AudioTrack can't be used with such a mask
488     */
489    private static boolean isMultichannelConfigSupported(int channelConfig) {
490        // check for unsupported channels
491        if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
492            loge("Channel configuration features unsupported channels");
493            return false;
494        }
495        // check for unsupported multichannel combinations:
496        // - FL/FR must be present
497        // - L/R channels must be paired (e.g. no single L channel)
498        final int frontPair =
499                AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
500        if ((channelConfig & frontPair) != frontPair) {
501                loge("Front channels must be present in multichannel configurations");
502                return false;
503        }
504        final int backPair =
505                AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
506        if ((channelConfig & backPair) != 0) {
507            if ((channelConfig & backPair) != backPair) {
508                loge("Rear channels can't be used independently");
509                return false;
510            }
511        }
512        return true;
513    }
514
515
516    // Convenience method for the constructor's audio buffer size check.
517    // preconditions:
518    //    mChannelCount is valid
519    //    mAudioFormat is valid
520    // postcondition:
521    //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
522    private void audioBuffSizeCheck(int audioBufferSize) {
523        // NB: this section is only valid with PCM data.
524        //     To update when supporting compressed formats
525        int frameSizeInBytes = mChannelCount
526                * (AudioFormat.getBytesPerSample(mAudioFormat));
527        if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
528            throw new IllegalArgumentException("Invalid audio buffer size.");
529        }
530
531        mNativeBufferSizeInBytes = audioBufferSize;
532        mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
533    }
534
535
536    /**
537     * Releases the native AudioTrack resources.
538     */
539    public void release() {
540        // even though native_release() stops the native AudioTrack, we need to stop
541        // AudioTrack subclasses too.
542        try {
543            stop();
544        } catch(IllegalStateException ise) {
545            // don't raise an exception, we're releasing the resources.
546        }
547        native_release();
548        mState = STATE_UNINITIALIZED;
549    }
550
551    @Override
552    protected void finalize() {
553        native_finalize();
554    }
555
556    //--------------------------------------------------------------------------
557    // Getters
558    //--------------------
559    /**
560     * Returns the minimum gain value, which is the constant 0.0.
561     * Gain values less than 0.0 will be clamped to 0.0.
562     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
563     * @return the minimum value, which is the constant 0.0.
564     */
565    static public float getMinVolume() {
566        return GAIN_MIN;
567    }
568
569    /**
570     * Returns the maximum gain value, which is greater than or equal to 1.0.
571     * Gain values greater than the maximum will be clamped to the maximum.
572     * <p>The word "volume" in the API name is historical; this is actually a gain.
573     * expressed as a linear multiplier on sample values, where a maximum value of 1.0
574     * corresponds to a gain of 0 dB (sample values left unmodified).
575     * @return the maximum value, which is greater than or equal to 1.0.
576     */
577    static public float getMaxVolume() {
578        return GAIN_MAX;
579    }
580
581    /**
582     * Returns the configured audio data sample rate in Hz
583     */
584    public int getSampleRate() {
585        return mSampleRate;
586    }
587
588    /**
589     * Returns the current playback rate in Hz.
590     */
591    public int getPlaybackRate() {
592        return native_get_playback_rate();
593    }
594
595    /**
596     * Returns the configured audio data format. See {@link AudioFormat#ENCODING_PCM_16BIT}
597     * and {@link AudioFormat#ENCODING_PCM_8BIT}.
598     */
599    public int getAudioFormat() {
600        return mAudioFormat;
601    }
602
603    /**
604     * Returns the type of audio stream this AudioTrack is configured for.
605     * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
606     * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
607     * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
608     * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
609     */
610    public int getStreamType() {
611        return mStreamType;
612    }
613
614    /**
615     * Returns the configured channel configuration.
616     * See {@link AudioFormat#CHANNEL_OUT_MONO}
617     * and {@link AudioFormat#CHANNEL_OUT_STEREO}.
618     */
619    public int getChannelConfiguration() {
620        return mChannelConfiguration;
621    }
622
623    /**
624     * Returns the configured number of channels.
625     */
626    public int getChannelCount() {
627        return mChannelCount;
628    }
629
630    /**
631     * Returns the state of the AudioTrack instance. This is useful after the
632     * AudioTrack instance has been created to check if it was initialized
633     * properly. This ensures that the appropriate resources have been acquired.
634     * @see #STATE_INITIALIZED
635     * @see #STATE_NO_STATIC_DATA
636     * @see #STATE_UNINITIALIZED
637     */
638    public int getState() {
639        return mState;
640    }
641
642    /**
643     * Returns the playback state of the AudioTrack instance.
644     * @see #PLAYSTATE_STOPPED
645     * @see #PLAYSTATE_PAUSED
646     * @see #PLAYSTATE_PLAYING
647     */
648    public int getPlayState() {
649        synchronized (mPlayStateLock) {
650            return mPlayState;
651        }
652    }
653
654    /**
655     *  Returns the "native frame count", derived from the bufferSizeInBytes specified at
656     *  creation time and converted to frame units.
657     *  If track's creation mode is {@link #MODE_STATIC},
658     *  it is equal to the specified bufferSizeInBytes converted to frame units.
659     *  If track's creation mode is {@link #MODE_STREAM},
660     *  it is typically greater than or equal to the specified bufferSizeInBytes converted to frame
661     *  units; it may be rounded up to a larger value if needed by the target device implementation.
662     *  @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
663     *  See {@link AudioManager#getProperty(String)} for key
664     *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
665     */
666    @Deprecated
667    protected int getNativeFrameCount() {
668        return native_get_native_frame_count();
669    }
670
671    /**
672     * Returns marker position expressed in frames.
673     * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
674     * or zero if marker is disabled.
675     */
676    public int getNotificationMarkerPosition() {
677        return native_get_marker_pos();
678    }
679
680    /**
681     * Returns the notification update period expressed in frames.
682     * Zero means that no position update notifications are being delivered.
683     */
684    public int getPositionNotificationPeriod() {
685        return native_get_pos_update_period();
686    }
687
688    /**
689     * Returns the playback head position expressed in frames.
690     * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
691     * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
692     * This is a continuously advancing counter.  It will wrap (overflow) periodically,
693     * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
694     * It is reset to zero by flush(), reload(), and stop().
695     */
696    public int getPlaybackHeadPosition() {
697        return native_get_position();
698    }
699
700    /**
701     * Returns this track's estimated latency in milliseconds. This includes the latency due
702     * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
703     *
704     * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
705     * a better solution.
706     * @hide
707     */
708    public int getLatency() {
709        return native_get_latency();
710    }
711
712    /**
713     *  Returns the output sample rate in Hz for the specified stream type.
714     */
715    static public int getNativeOutputSampleRate(int streamType) {
716        return native_get_output_sample_rate(streamType);
717    }
718
719    /**
720     * Returns the minimum buffer size required for the successful creation of an AudioTrack
721     * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
722     * guarantee a smooth playback under load, and higher values should be chosen according to
723     * the expected frequency at which the buffer will be refilled with additional data to play.
724     * For example, if you intend to dynamically set the source sample rate of an AudioTrack
725     * to a higher value than the initial source sample rate, be sure to configure the buffer size
726     * based on the highest planned sample rate.
727     * @param sampleRateInHz the source sample rate expressed in Hz.
728     * @param channelConfig describes the configuration of the audio channels.
729     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
730     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
731     * @param audioFormat the format in which the audio data is represented.
732     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
733     *   {@link AudioFormat#ENCODING_PCM_8BIT},
734     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
735     * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
736     *   or {@link #ERROR} if unable to query for output properties,
737     *   or the minimum buffer size expressed in bytes.
738     */
739    static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
740        int channelCount = 0;
741        switch(channelConfig) {
742        case AudioFormat.CHANNEL_OUT_MONO:
743        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
744            channelCount = 1;
745            break;
746        case AudioFormat.CHANNEL_OUT_STEREO:
747        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
748            channelCount = 2;
749            break;
750        default:
751            if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
752                // input channel configuration features unsupported channels
753                loge("getMinBufferSize(): Invalid channel configuration.");
754                return ERROR_BAD_VALUE;
755            } else {
756                channelCount = Integer.bitCount(channelConfig);
757            }
758        }
759
760        if ((audioFormat != AudioFormat.ENCODING_PCM_16BIT)
761            && (audioFormat != AudioFormat.ENCODING_PCM_8BIT)
762            && (audioFormat != AudioFormat.ENCODING_PCM_FLOAT)) {
763            loge("getMinBufferSize(): Invalid audio format.");
764            return ERROR_BAD_VALUE;
765        }
766
767        // sample rate, note these values are subject to change
768        if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {
769            loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
770            return ERROR_BAD_VALUE;
771        }
772
773        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
774        if (size <= 0) {
775            loge("getMinBufferSize(): error querying hardware");
776            return ERROR;
777        }
778        else {
779            return size;
780        }
781    }
782
783    /**
784     * Returns the audio session ID.
785     *
786     * @return the ID of the audio session this AudioTrack belongs to.
787     */
788    public int getAudioSessionId() {
789        return mSessionId;
790    }
791
792   /**
793    * Poll for a timestamp on demand.
794    * <p>
795    * If you need to track timestamps during initial warmup or after a routing or mode change,
796    * you should request a new timestamp once per second until the reported timestamps
797    * show that the audio clock is stable.
798    * Thereafter, query for a new timestamp approximately once every 10 seconds to once per minute.
799    * Calling this method more often is inefficient.
800    * It is also counter-productive to call this method more often than recommended,
801    * because the short-term differences between successive timestamp reports are not meaningful.
802    * If you need a high-resolution mapping between frame position and presentation time,
803    * consider implementing that at application level, based on low-resolution timestamps.
804    * <p>
805    * The audio data at the returned position may either already have been
806    * presented, or may have not yet been presented but is committed to be presented.
807    * It is not possible to request the time corresponding to a particular position,
808    * or to request the (fractional) position corresponding to a particular time.
809    * If you need such features, consider implementing them at application level.
810    *
811    * @param timestamp a reference to a non-null AudioTimestamp instance allocated
812    *        and owned by caller.
813    * @return true if a timestamp is available, or false if no timestamp is available.
814    *         If a timestamp if available,
815    *         the AudioTimestamp instance is filled in with a position in frame units, together
816    *         with the estimated time when that frame was presented or is committed to
817    *         be presented.
818    *         In the case that no timestamp is available, any supplied instance is left unaltered.
819    */
820    // Add this text when the "on new timestamp" API is added:
821    //   Use if you need to get the most recent timestamp outside of the event callback handler.
822    public boolean getTimestamp(AudioTimestamp timestamp)
823    {
824        if (timestamp == null) {
825            throw new IllegalArgumentException();
826        }
827        // It's unfortunate, but we have to either create garbage every time or use synchronized
828        long[] longArray = new long[2];
829        int ret = native_get_timestamp(longArray);
830        if (ret != SUCCESS) {
831            return false;
832        }
833        timestamp.framePosition = longArray[0];
834        timestamp.nanoTime = longArray[1];
835        return true;
836    }
837
838
839    //--------------------------------------------------------------------------
840    // Initialization / configuration
841    //--------------------
842    /**
843     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
844     * for each periodic playback head position update.
845     * Notifications will be received in the same thread as the one in which the AudioTrack
846     * instance was created.
847     * @param listener
848     */
849    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
850        setPlaybackPositionUpdateListener(listener, null);
851    }
852
853    /**
854     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
855     * for each periodic playback head position update.
856     * Use this method to receive AudioTrack events in the Handler associated with another
857     * thread than the one in which you created the AudioTrack instance.
858     * @param listener
859     * @param handler the Handler that will receive the event notification messages.
860     */
861    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
862                                                    Handler handler) {
863        if (listener != null) {
864            mEventHandlerDelegate = new NativeEventHandlerDelegate(this, listener, handler);
865        } else {
866            mEventHandlerDelegate = null;
867        }
868    }
869
870
871    private static float clampGainOrLevel(float gainOrLevel) {
872        if (Float.isNaN(gainOrLevel)) {
873            throw new IllegalArgumentException();
874        }
875        if (gainOrLevel < GAIN_MIN) {
876            gainOrLevel = GAIN_MIN;
877        } else if (gainOrLevel > GAIN_MAX) {
878            gainOrLevel = GAIN_MAX;
879        }
880        return gainOrLevel;
881    }
882
883
884     /**
885     * Sets the specified left and right output gain values on the AudioTrack.
886     * <p>Gain values are clamped to the closed interval [0.0, max] where
887     * max is the value of {@link #getMaxVolume}.
888     * A value of 0.0 results in zero gain (silence), and
889     * a value of 1.0 means unity gain (signal unchanged).
890     * The default value is 1.0 meaning unity gain.
891     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
892     * @param leftGain output gain for the left channel.
893     * @param rightGain output gain for the right channel
894     * @return error code or success, see {@link #SUCCESS},
895     *    {@link #ERROR_INVALID_OPERATION}
896     * @deprecated Applications should use {@link #setVolume} instead, as it
897     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
898     */
899    public int setStereoVolume(float leftGain, float rightGain) {
900        if (isRestricted()) {
901            return SUCCESS;
902        }
903        if (mState == STATE_UNINITIALIZED) {
904            return ERROR_INVALID_OPERATION;
905        }
906
907        leftGain = clampGainOrLevel(leftGain);
908        rightGain = clampGainOrLevel(rightGain);
909
910        native_setVolume(leftGain, rightGain);
911
912        return SUCCESS;
913    }
914
915
916    /**
917     * Sets the specified output gain value on all channels of this track.
918     * <p>Gain values are clamped to the closed interval [0.0, max] where
919     * max is the value of {@link #getMaxVolume}.
920     * A value of 0.0 results in zero gain (silence), and
921     * a value of 1.0 means unity gain (signal unchanged).
922     * The default value is 1.0 meaning unity gain.
923     * <p>This API is preferred over {@link #setStereoVolume}, as it
924     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
925     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
926     * @param gain output gain for all channels.
927     * @return error code or success, see {@link #SUCCESS},
928     *    {@link #ERROR_INVALID_OPERATION}
929     */
930    public int setVolume(float gain) {
931        return setStereoVolume(gain, gain);
932    }
933
934
935    /**
936     * Sets the playback sample rate for this track. This sets the sampling rate at which
937     * the audio data will be consumed and played back
938     * (as set by the sampleRateInHz parameter in the
939     * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
940     * not the original sampling rate of the
941     * content. For example, setting it to half the sample rate of the content will cause the
942     * playback to last twice as long, but will also result in a pitch shift down by one octave.
943     * The valid sample rate range is from 1 Hz to twice the value returned by
944     * {@link #getNativeOutputSampleRate(int)}.
945     * @param sampleRateInHz the sample rate expressed in Hz
946     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
947     *    {@link #ERROR_INVALID_OPERATION}
948     */
949    public int setPlaybackRate(int sampleRateInHz) {
950        if (mState != STATE_INITIALIZED) {
951            return ERROR_INVALID_OPERATION;
952        }
953        if (sampleRateInHz <= 0) {
954            return ERROR_BAD_VALUE;
955        }
956        return native_set_playback_rate(sampleRateInHz);
957    }
958
959
960    /**
961     * Sets the position of the notification marker.  At most one marker can be active.
962     * @param markerInFrames marker position in wrapping frame units similar to
963     * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
964     * To set a marker at a position which would appear as zero due to wraparound,
965     * a workaround is to use a non-zero position near zero, such as -1 or 1.
966     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
967     *  {@link #ERROR_INVALID_OPERATION}
968     */
969    public int setNotificationMarkerPosition(int markerInFrames) {
970        if (mState == STATE_UNINITIALIZED) {
971            return ERROR_INVALID_OPERATION;
972        }
973        return native_set_marker_pos(markerInFrames);
974    }
975
976
977    /**
978     * Sets the period for the periodic notification event.
979     * @param periodInFrames update period expressed in frames
980     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
981     */
982    public int setPositionNotificationPeriod(int periodInFrames) {
983        if (mState == STATE_UNINITIALIZED) {
984            return ERROR_INVALID_OPERATION;
985        }
986        return native_set_pos_update_period(periodInFrames);
987    }
988
989
990    /**
991     * Sets the playback head position.
992     * The track must be stopped or paused for the position to be changed,
993     * and must use the {@link #MODE_STATIC} mode.
994     * @param positionInFrames playback head position expressed in frames
995     * Zero corresponds to start of buffer.
996     * The position must not be greater than the buffer size in frames, or negative.
997     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
998     *    {@link #ERROR_INVALID_OPERATION}
999     */
1000    public int setPlaybackHeadPosition(int positionInFrames) {
1001        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1002                getPlayState() == PLAYSTATE_PLAYING) {
1003            return ERROR_INVALID_OPERATION;
1004        }
1005        if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
1006            return ERROR_BAD_VALUE;
1007        }
1008        return native_set_position(positionInFrames);
1009    }
1010
1011    /**
1012     * Sets the loop points and the loop count. The loop can be infinite.
1013     * Similarly to setPlaybackHeadPosition,
1014     * the track must be stopped or paused for the loop points to be changed,
1015     * and must use the {@link #MODE_STATIC} mode.
1016     * @param startInFrames loop start marker expressed in frames
1017     * Zero corresponds to start of buffer.
1018     * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1019     * @param endInFrames loop end marker expressed in frames
1020     * The total buffer size in frames corresponds to end of buffer.
1021     * The end marker must not be greater than the buffer size in frames.
1022     * For looping, the end marker must not be less than or equal to the start marker,
1023     * but to disable looping
1024     * it is permitted for start marker, end marker, and loop count to all be 0.
1025     * @param loopCount the number of times the loop is looped.
1026     *    A value of -1 means infinite looping, and 0 disables looping.
1027     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1028     *    {@link #ERROR_INVALID_OPERATION}
1029     */
1030    public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1031        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1032                getPlayState() == PLAYSTATE_PLAYING) {
1033            return ERROR_INVALID_OPERATION;
1034        }
1035        if (loopCount == 0) {
1036            ;   // explicitly allowed as an exception to the loop region range check
1037        } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1038                startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1039            return ERROR_BAD_VALUE;
1040        }
1041        return native_set_loop(startInFrames, endInFrames, loopCount);
1042    }
1043
1044    /**
1045     * Sets the initialization state of the instance. This method was originally intended to be used
1046     * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1047     * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1048     * @param state the state of the AudioTrack instance
1049     * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1050     */
1051    @Deprecated
1052    protected void setState(int state) {
1053        mState = state;
1054    }
1055
1056
1057    //---------------------------------------------------------
1058    // Transport control methods
1059    //--------------------
1060    /**
1061     * Starts playing an AudioTrack.
1062     * If track's creation mode is {@link #MODE_STATIC}, you must have called write() prior.
1063     *
1064     * @throws IllegalStateException
1065     */
1066    public void play()
1067    throws IllegalStateException {
1068        if (mState != STATE_INITIALIZED) {
1069            throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1070        }
1071        if (isRestricted()) {
1072            setVolume(0);
1073        }
1074        synchronized(mPlayStateLock) {
1075            native_start();
1076            mPlayState = PLAYSTATE_PLAYING;
1077        }
1078    }
1079
1080    private boolean isRestricted() {
1081        try {
1082            final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, mStreamType,
1083                    Process.myUid(), ActivityThread.currentPackageName());
1084            return mode != AppOpsManager.MODE_ALLOWED;
1085        } catch (RemoteException e) {
1086            return false;
1087        }
1088    }
1089
1090    /**
1091     * Stops playing the audio data.
1092     * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
1093     * after the last buffer that was written has been played. For an immediate stop, use
1094     * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
1095     * back yet.
1096     * @throws IllegalStateException
1097     */
1098    public void stop()
1099    throws IllegalStateException {
1100        if (mState != STATE_INITIALIZED) {
1101            throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
1102        }
1103
1104        // stop playing
1105        synchronized(mPlayStateLock) {
1106            native_stop();
1107            mPlayState = PLAYSTATE_STOPPED;
1108        }
1109    }
1110
1111    /**
1112     * Pauses the playback of the audio data. Data that has not been played
1113     * back will not be discarded. Subsequent calls to {@link #play} will play
1114     * this data back. See {@link #flush()} to discard this data.
1115     *
1116     * @throws IllegalStateException
1117     */
1118    public void pause()
1119    throws IllegalStateException {
1120        if (mState != STATE_INITIALIZED) {
1121            throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
1122        }
1123        //logd("pause()");
1124
1125        // pause playback
1126        synchronized(mPlayStateLock) {
1127            native_pause();
1128            mPlayState = PLAYSTATE_PAUSED;
1129        }
1130    }
1131
1132
1133    //---------------------------------------------------------
1134    // Audio data supply
1135    //--------------------
1136
1137    /**
1138     * Flushes the audio data currently queued for playback. Any data that has
1139     * not been played back will be discarded.  No-op if not stopped or paused,
1140     * or if the track's creation mode is not {@link #MODE_STREAM}.
1141     */
1142    public void flush() {
1143        if (mState == STATE_INITIALIZED) {
1144            // flush the data in native layer
1145            native_flush();
1146        }
1147
1148    }
1149
1150    /**
1151     * Writes the audio data to the audio sink for playback (streaming mode),
1152     * or copies audio data for later playback (static buffer mode).
1153     * In streaming mode, will block until all data has been written to the audio sink.
1154     * In static buffer mode, copies the data to the buffer starting at offset 0.
1155     * Note that the actual playback of this data might occur after this function
1156     * returns. This function is thread safe with respect to {@link #stop} calls,
1157     * in which case all of the specified data might not be written to the audio sink.
1158     *
1159     * @param audioData the array that holds the data to play.
1160     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
1161     *    starts.
1162     * @param sizeInBytes the number of bytes to read in audioData after the offset.
1163     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
1164     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1165     *    the parameters don't resolve to valid data and indexes.
1166     */
1167
1168    public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) {
1169
1170        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1171            return ERROR_INVALID_OPERATION;
1172        }
1173
1174        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
1175                || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
1176                || (offsetInBytes + sizeInBytes > audioData.length)) {
1177            return ERROR_BAD_VALUE;
1178        }
1179
1180        int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
1181                true /*isBlocking*/);
1182
1183        if ((mDataLoadMode == MODE_STATIC)
1184                && (mState == STATE_NO_STATIC_DATA)
1185                && (ret > 0)) {
1186            // benign race with respect to other APIs that read mState
1187            mState = STATE_INITIALIZED;
1188        }
1189
1190        return ret;
1191    }
1192
1193
1194    /**
1195     * Writes the audio data to the audio sink for playback (streaming mode),
1196     * or copies audio data for later playback (static buffer mode).
1197     * In streaming mode, will block until all data has been written to the audio sink.
1198     * In static buffer mode, copies the data to the buffer starting at offset 0.
1199     * Note that the actual playback of this data might occur after this function
1200     * returns. This function is thread safe with respect to {@link #stop} calls,
1201     * in which case all of the specified data might not be written to the audio sink.
1202     *
1203     * @param audioData the array that holds the data to play.
1204     * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1205     *     starts.
1206     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1207     * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION}
1208     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1209     *    the parameters don't resolve to valid data and indexes.
1210     */
1211
1212    public int write(short[] audioData, int offsetInShorts, int sizeInShorts) {
1213
1214        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1215            return ERROR_INVALID_OPERATION;
1216        }
1217
1218        if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
1219                || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
1220                || (offsetInShorts + sizeInShorts > audioData.length)) {
1221            return ERROR_BAD_VALUE;
1222        }
1223
1224        int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat);
1225
1226        if ((mDataLoadMode == MODE_STATIC)
1227                && (mState == STATE_NO_STATIC_DATA)
1228                && (ret > 0)) {
1229            // benign race with respect to other APIs that read mState
1230            mState = STATE_INITIALIZED;
1231        }
1232
1233        return ret;
1234    }
1235
1236
1237    /**
1238     * Writes the audio data to the audio sink for playback (streaming mode),
1239     * or copies audio data for later playback (static buffer mode).
1240     * In static buffer mode, copies the data to the buffer starting at offset 0,
1241     * and the write mode is ignored.
1242     * In streaming mode, the blocking behavior will depend on the write mode.
1243     * <p>
1244     * Note that the actual playback of this data might occur after this function
1245     * returns. This function is thread safe with respect to {@link #stop} calls,
1246     * in which case all of the specified data might not be written to the audio sink.
1247     * <p>
1248     * @param audioData the array that holds the data to play.
1249     *     The implementation does not clip for sample values within the nominal range
1250     *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
1251     *     less than or equal to unity (1.0f), and in the absence of post-processing effects
1252     *     that could add energy, such as reverb.  For the convenience of applications
1253     *     that compute samples using filters with non-unity gain,
1254     *     sample values +3 dB beyond the nominal range are permitted.
1255     *     However such values may eventually be limited or clipped, depending on various gains
1256     *     and later processing in the audio path.  Therefore applications are encouraged
1257     *     to provide samples values within the nominal range.
1258     * @param offsetInFloats the offset, expressed as a number of floats,
1259     *     in audioData where the data to play starts.
1260     * @param sizeInFloats the number of floats to read in audioData after the offset.
1261     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1262     *     effect in static mode.
1263     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1264     *         to the audio sink.
1265     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1266     *     queuing as much audio data for playback as possible without blocking.
1267     * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION}
1268     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1269     *    the parameters don't resolve to valid data and indexes.
1270     */
1271    public int write(float[] audioData, int offsetInFloats, int sizeInFloats,
1272            @WriteMode int writeMode) {
1273
1274        if (mState == STATE_UNINITIALIZED) {
1275            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1276            return ERROR_INVALID_OPERATION;
1277        }
1278
1279        if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
1280            Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
1281            return ERROR_INVALID_OPERATION;
1282        }
1283
1284        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1285            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1286            return ERROR_BAD_VALUE;
1287        }
1288
1289        if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
1290                || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
1291                || (offsetInFloats + sizeInFloats > audioData.length)) {
1292            Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
1293            return ERROR_BAD_VALUE;
1294        }
1295
1296        int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
1297                writeMode == WRITE_BLOCKING);
1298
1299        if ((mDataLoadMode == MODE_STATIC)
1300                && (mState == STATE_NO_STATIC_DATA)
1301                && (ret > 0)) {
1302            // benign race with respect to other APIs that read mState
1303            mState = STATE_INITIALIZED;
1304        }
1305
1306        return ret;
1307    }
1308
1309
1310    /**
1311     * Writes the audio data to the audio sink for playback (streaming mode),
1312     * or copies audio data for later playback (static buffer mode).
1313     * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write
1314     * mode is ignored.
1315     * In streaming mode, the blocking behavior will depend on the write mode.
1316     * @param audioData the buffer that holds the data to play, starting at the position reported
1317     *     by <code>audioData.position()</code>.
1318     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
1319     *     have been advanced to reflect the amount of data that was successfully written to
1320     *     the AudioTrack.
1321     * @param sizeInBytes number of bytes to write.
1322     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
1323     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1324     *     effect in static mode.
1325     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1326     *         to the audio sink.
1327     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1328     *     queuing as much audio data for playback as possible without blocking.
1329     * @return 0 or a positive number of bytes that were written, or
1330     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1331     */
1332    public int write(ByteBuffer audioData, int sizeInBytes,
1333            @WriteMode int writeMode) {
1334
1335        if (mState == STATE_UNINITIALIZED) {
1336            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1337            return ERROR_INVALID_OPERATION;
1338        }
1339
1340        if (mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1341            Log.e(TAG, "AudioTrack.write(ByteBuffer ...) not yet supported for ENCODING_PCM_FLOAT");
1342            return ERROR_INVALID_OPERATION;
1343        }
1344
1345        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1346            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1347            return ERROR_BAD_VALUE;
1348        }
1349
1350        if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
1351            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
1352            return ERROR_BAD_VALUE;
1353        }
1354
1355        int ret = 0;
1356        if (audioData.isDirect()) {
1357            ret = native_write_native_bytes(audioData,
1358                    audioData.position(), sizeInBytes, mAudioFormat,
1359                    writeMode == WRITE_BLOCKING);
1360        } else {
1361            ret = native_write_byte(NioUtils.unsafeArray(audioData),
1362                    NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
1363                    sizeInBytes, mAudioFormat,
1364                    writeMode == WRITE_BLOCKING);
1365        }
1366
1367        if ((mDataLoadMode == MODE_STATIC)
1368                && (mState == STATE_NO_STATIC_DATA)
1369                && (ret > 0)) {
1370            // benign race with respect to other APIs that read mState
1371            mState = STATE_INITIALIZED;
1372        }
1373
1374        if (ret > 0) {
1375            audioData.position(audioData.position() + ret);
1376        }
1377
1378        return ret;
1379    }
1380
1381    /**
1382     * Notifies the native resource to reuse the audio data already loaded in the native
1383     * layer, that is to rewind to start of buffer.
1384     * The track's creation mode must be {@link #MODE_STATIC}.
1385     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1386     *  {@link #ERROR_INVALID_OPERATION}
1387     */
1388    public int reloadStaticData() {
1389        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
1390            return ERROR_INVALID_OPERATION;
1391        }
1392        return native_reload_static();
1393    }
1394
1395    //--------------------------------------------------------------------------
1396    // Audio effects management
1397    //--------------------
1398
1399    /**
1400     * Attaches an auxiliary effect to the audio track. A typical auxiliary
1401     * effect is a reverberation effect which can be applied on any sound source
1402     * that directs a certain amount of its energy to this effect. This amount
1403     * is defined by setAuxEffectSendLevel().
1404     * {@see #setAuxEffectSendLevel(float)}.
1405     * <p>After creating an auxiliary effect (e.g.
1406     * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
1407     * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
1408     * this method to attach the audio track to the effect.
1409     * <p>To detach the effect from the audio track, call this method with a
1410     * null effect id.
1411     *
1412     * @param effectId system wide unique id of the effect to attach
1413     * @return error code or success, see {@link #SUCCESS},
1414     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
1415     */
1416    public int attachAuxEffect(int effectId) {
1417        if (mState == STATE_UNINITIALIZED) {
1418            return ERROR_INVALID_OPERATION;
1419        }
1420        return native_attachAuxEffect(effectId);
1421    }
1422
1423    /**
1424     * Sets the send level of the audio track to the attached auxiliary effect
1425     * {@link #attachAuxEffect(int)}.  Effect levels
1426     * are clamped to the closed interval [0.0, max] where
1427     * max is the value of {@link #getMaxVolume}.
1428     * A value of 0.0 results in no effect, and a value of 1.0 is full send.
1429     * <p>By default the send level is 0.0f, so even if an effect is attached to the player
1430     * this method must be called for the effect to be applied.
1431     * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
1432     * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
1433     * so an appropriate conversion from linear UI input x to level is:
1434     * x == 0 -&gt; level = 0
1435     * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
1436     *
1437     * @param level linear send level
1438     * @return error code or success, see {@link #SUCCESS},
1439     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
1440     */
1441    public int setAuxEffectSendLevel(float level) {
1442        if (isRestricted()) {
1443            return SUCCESS;
1444        }
1445        if (mState == STATE_UNINITIALIZED) {
1446            return ERROR_INVALID_OPERATION;
1447        }
1448        level = clampGainOrLevel(level);
1449        int err = native_setAuxEffectSendLevel(level);
1450        return err == 0 ? SUCCESS : ERROR;
1451    }
1452
1453    //---------------------------------------------------------
1454    // Interface definitions
1455    //--------------------
1456    /**
1457     * Interface definition for a callback to be invoked when the playback head position of
1458     * an AudioTrack has reached a notification marker or has increased by a certain period.
1459     */
1460    public interface OnPlaybackPositionUpdateListener  {
1461        /**
1462         * Called on the listener to notify it that the previously set marker has been reached
1463         * by the playback head.
1464         */
1465        void onMarkerReached(AudioTrack track);
1466
1467        /**
1468         * Called on the listener to periodically notify it that the playback head has reached
1469         * a multiple of the notification period.
1470         */
1471        void onPeriodicNotification(AudioTrack track);
1472    }
1473
1474
1475    //---------------------------------------------------------
1476    // Inner classes
1477    //--------------------
1478    /**
1479     * Helper class to handle the forwarding of native events to the appropriate listener
1480     * (potentially) handled in a different thread
1481     */
1482    private class NativeEventHandlerDelegate {
1483        private final Handler mHandler;
1484
1485        NativeEventHandlerDelegate(final AudioTrack track,
1486                                   final OnPlaybackPositionUpdateListener listener,
1487                                   Handler handler) {
1488            // find the looper for our new event handler
1489            Looper looper;
1490            if (handler != null) {
1491                looper = handler.getLooper();
1492            } else {
1493                // no given handler, use the looper the AudioTrack was created in
1494                looper = mInitializationLooper;
1495            }
1496
1497            // construct the event handler with this looper
1498            if (looper != null) {
1499                // implement the event handler delegate
1500                mHandler = new Handler(looper) {
1501                    @Override
1502                    public void handleMessage(Message msg) {
1503                        if (track == null) {
1504                            return;
1505                        }
1506                        switch(msg.what) {
1507                        case NATIVE_EVENT_MARKER:
1508                            if (listener != null) {
1509                                listener.onMarkerReached(track);
1510                            }
1511                            break;
1512                        case NATIVE_EVENT_NEW_POS:
1513                            if (listener != null) {
1514                                listener.onPeriodicNotification(track);
1515                            }
1516                            break;
1517                        default:
1518                            loge("Unknown native event type: " + msg.what);
1519                            break;
1520                        }
1521                    }
1522                };
1523            } else {
1524                mHandler = null;
1525            }
1526        }
1527
1528        Handler getHandler() {
1529            return mHandler;
1530        }
1531    }
1532
1533
1534    //---------------------------------------------------------
1535    // Java methods called from the native side
1536    //--------------------
1537    @SuppressWarnings("unused")
1538    private static void postEventFromNative(Object audiotrack_ref,
1539            int what, int arg1, int arg2, Object obj) {
1540        //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
1541        AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
1542        if (track == null) {
1543            return;
1544        }
1545
1546        NativeEventHandlerDelegate delegate = track.mEventHandlerDelegate;
1547        if (delegate != null) {
1548            Handler handler = delegate.getHandler();
1549            if (handler != null) {
1550                Message m = handler.obtainMessage(what, arg1, arg2, obj);
1551                handler.sendMessage(m);
1552            }
1553        }
1554
1555    }
1556
1557
1558    //---------------------------------------------------------
1559    // Native methods called from the Java side
1560    //--------------------
1561
1562    private native final int native_setup(Object audiotrack_this,
1563            int streamType, int sampleRate, int channelMask, int audioFormat,
1564            int buffSizeInBytes, int mode, int[] sessionId);
1565
1566    private native final void native_finalize();
1567
1568    private native final void native_release();
1569
1570    private native final void native_start();
1571
1572    private native final void native_stop();
1573
1574    private native final void native_pause();
1575
1576    private native final void native_flush();
1577
1578    private native final int native_write_byte(byte[] audioData,
1579                                               int offsetInBytes, int sizeInBytes, int format,
1580                                               boolean isBlocking);
1581
1582    private native final int native_write_short(short[] audioData,
1583                                                int offsetInShorts, int sizeInShorts, int format);
1584
1585    private native final int native_write_float(float[] audioData,
1586                                                int offsetInFloats, int sizeInFloats, int format,
1587                                                boolean isBlocking);
1588
1589    private native final int native_write_native_bytes(Object audioData,
1590            int positionInBytes, int sizeInBytes, int format, boolean blocking);
1591
1592    private native final int native_reload_static();
1593
1594    private native final int native_get_native_frame_count();
1595
1596    private native final void native_setVolume(float leftVolume, float rightVolume);
1597
1598    private native final int native_set_playback_rate(int sampleRateInHz);
1599    private native final int native_get_playback_rate();
1600
1601    private native final int native_set_marker_pos(int marker);
1602    private native final int native_get_marker_pos();
1603
1604    private native final int native_set_pos_update_period(int updatePeriod);
1605    private native final int native_get_pos_update_period();
1606
1607    private native final int native_set_position(int position);
1608    private native final int native_get_position();
1609
1610    private native final int native_get_latency();
1611
1612    // longArray must be a non-null array of length >= 2
1613    // [0] is assigned the frame position
1614    // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
1615    private native final int native_get_timestamp(long[] longArray);
1616
1617    private native final int native_set_loop(int start, int end, int loopCount);
1618
1619    static private native final int native_get_output_sample_rate(int streamType);
1620    static private native final int native_get_min_buff_size(
1621            int sampleRateInHz, int channelConfig, int audioFormat);
1622
1623    private native final int native_attachAuxEffect(int effectId);
1624    private native final int native_setAuxEffectSendLevel(float level);
1625
1626    //---------------------------------------------------------
1627    // Utility methods
1628    //------------------
1629
1630    private static void logd(String msg) {
1631        Log.d(TAG, msg);
1632    }
1633
1634    private static void loge(String msg) {
1635        Log.e(TAG, msg);
1636    }
1637
1638}
1639