AudioTrack.java revision c52f4e25e21719d5d637a588e3200be941b9fe4d
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.media;
18
19import java.lang.annotation.Retention;
20import java.lang.annotation.RetentionPolicy;
21import java.lang.ref.WeakReference;
22import java.nio.ByteBuffer;
23import java.nio.NioUtils;
24
25import android.annotation.IntDef;
26import android.app.ActivityThread;
27import android.app.AppOpsManager;
28import android.content.Context;
29import android.os.Handler;
30import android.os.IBinder;
31import android.os.Looper;
32import android.os.Message;
33import android.os.Process;
34import android.os.RemoteException;
35import android.os.ServiceManager;
36import android.util.Log;
37
38import com.android.internal.app.IAppOpsService;
39
40
41/**
42 * The AudioTrack class manages and plays a single audio resource for Java applications.
43 * It allows streaming of PCM audio buffers to the audio sink for playback. This is
44 * achieved by "pushing" the data to the AudioTrack object using one of the
45 *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
46 *  and {@link #write(float[], int, int, int)} methods.
47 *
48 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
49 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
50 * one of the {@code write()} methods. These are blocking and return when the data has been
51 * transferred from the Java layer to the native layer and queued for playback. The streaming
52 * mode is most useful when playing blocks of audio data that for instance are:
53 *
54 * <ul>
55 *   <li>too big to fit in memory because of the duration of the sound to play,</li>
56 *   <li>too big to fit in memory because of the characteristics of the audio data
57 *         (high sampling rate, bits per sample ...)</li>
58 *   <li>received or generated while previously queued audio is playing.</li>
59 * </ul>
60 *
61 * The static mode should be chosen when dealing with short sounds that fit in memory and
62 * that need to be played with the smallest latency possible. The static mode will
63 * therefore be preferred for UI and game sounds that are played often, and with the
64 * smallest overhead possible.
65 *
66 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
67 * The size of this buffer, specified during the construction, determines how long an AudioTrack
68 * can play before running out of data.<br>
69 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
70 * be played from it.<br>
71 * For the streaming mode, data will be written to the audio sink in chunks of
72 * sizes less than or equal to the total buffer size.
73 *
74 * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
75 */
76public class AudioTrack
77{
78    //---------------------------------------------------------
79    // Constants
80    //--------------------
81    /** Minimum value for a linear gain or auxiliary effect level.
82     *  This value must be exactly equal to 0.0f; do not change it.
83     */
84    private static final float GAIN_MIN = 0.0f;
85    /** Maximum value for a linear gain or auxiliary effect level.
86     *  This value must be greater than or equal to 1.0f.
87     */
88    private static final float GAIN_MAX = 1.0f;
89
90    /** Minimum value for sample rate */
91    private static final int SAMPLE_RATE_HZ_MIN = 4000;
92    /** Maximum value for sample rate */
93    private static final int SAMPLE_RATE_HZ_MAX = 48000;
94
95    /** indicates AudioTrack state is stopped */
96    public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
97    /** indicates AudioTrack state is paused */
98    public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
99    /** indicates AudioTrack state is playing */
100    public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
101
102    // keep these values in sync with android_media_AudioTrack.cpp
103    /**
104     * Creation mode where audio data is transferred from Java to the native layer
105     * only once before the audio starts playing.
106     */
107    public static final int MODE_STATIC = 0;
108    /**
109     * Creation mode where audio data is streamed from Java to the native layer
110     * as the audio is playing.
111     */
112    public static final int MODE_STREAM = 1;
113
114    /**
115     * State of an AudioTrack that was not successfully initialized upon creation.
116     */
117    public static final int STATE_UNINITIALIZED = 0;
118    /**
119     * State of an AudioTrack that is ready to be used.
120     */
121    public static final int STATE_INITIALIZED   = 1;
122    /**
123     * State of a successfully initialized AudioTrack that uses static data,
124     * but that hasn't received that data yet.
125     */
126    public static final int STATE_NO_STATIC_DATA = 2;
127
128    /**
129     * Denotes a successful operation.
130     */
131    public  static final int SUCCESS                               = AudioSystem.SUCCESS;
132    /**
133     * Denotes a generic operation failure.
134     */
135    public  static final int ERROR                                 = AudioSystem.ERROR;
136    /**
137     * Denotes a failure due to the use of an invalid value.
138     */
139    public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
140    /**
141     * Denotes a failure due to the improper use of a method.
142     */
143    public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
144
145    // Error codes:
146    // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
147    private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
148    private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
149    private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
150    private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
151    private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
152
153    // Events:
154    // to keep in sync with frameworks/av/include/media/AudioTrack.h
155    /**
156     * Event id denotes when playback head has reached a previously set marker.
157     */
158    private static final int NATIVE_EVENT_MARKER  = 3;
159    /**
160     * Event id denotes when previously set update period has elapsed during playback.
161     */
162    private static final int NATIVE_EVENT_NEW_POS = 4;
163
164    private final static String TAG = "android.media.AudioTrack";
165
166
167    /** @hide */
168    @IntDef({
169        WRITE_BLOCKING,
170        WRITE_NON_BLOCKING
171    })
172    @Retention(RetentionPolicy.SOURCE)
173    public @interface WriteMode {}
174
175    /**
176     * The write mode indicating the write operation will block until all data has been written,
177     * to be used in {@link #write(ByteBuffer, int, int)}
178     */
179    public final static int WRITE_BLOCKING = 0;
180    /**
181     * The write mode indicating the write operation will return immediately after
182     * queuing as much audio data for playback as possible without blocking, to be used in
183     * {@link #write(ByteBuffer, int, int)}.
184     */
185    public final static int WRITE_NON_BLOCKING = 1;
186
187    //--------------------------------------------------------------------------
188    // Member variables
189    //--------------------
190    /**
191     * Indicates the state of the AudioTrack instance.
192     */
193    private int mState = STATE_UNINITIALIZED;
194    /**
195     * Indicates the play state of the AudioTrack instance.
196     */
197    private int mPlayState = PLAYSTATE_STOPPED;
198    /**
199     * Lock to make sure mPlayState updates are reflecting the actual state of the object.
200     */
201    private final Object mPlayStateLock = new Object();
202    /**
203     * Sizes of the native audio buffer.
204     */
205    private int mNativeBufferSizeInBytes = 0;
206    private int mNativeBufferSizeInFrames = 0;
207    /**
208     * Handler for events coming from the native code.
209     */
210    private NativeEventHandlerDelegate mEventHandlerDelegate;
211    /**
212     * Looper associated with the thread that creates the AudioTrack instance.
213     */
214    private final Looper mInitializationLooper;
215    /**
216     * The audio data source sampling rate in Hz.
217     */
218    private int mSampleRate; // initialized by all constructors
219    /**
220     * The number of audio output channels (1 is mono, 2 is stereo).
221     */
222    private int mChannelCount = 1;
223    /**
224     * The audio channel mask.
225     */
226    private int mChannels = AudioFormat.CHANNEL_OUT_MONO;
227
228    /**
229     * The type of the audio stream to play. See
230     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
231     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
232     *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
233     *   {@link AudioManager#STREAM_DTMF}.
234     */
235    private int mStreamType = AudioManager.STREAM_MUSIC;
236    /**
237     * The way audio is consumed by the audio sink, streaming or static.
238     */
239    private int mDataLoadMode = MODE_STREAM;
240    /**
241     * The current audio channel configuration.
242     */
243    private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
244    /**
245     * The encoding of the audio samples.
246     * @see AudioFormat#ENCODING_PCM_8BIT
247     * @see AudioFormat#ENCODING_PCM_16BIT
248     * @see AudioFormat#ENCODING_PCM_FLOAT
249     */
250    private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
251    /**
252     * Audio session ID
253     */
254    private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
255    /**
256     * Reference to the app-ops service.
257     */
258    private final IAppOpsService mAppOps;
259
260    //--------------------------------
261    // Used exclusively by native code
262    //--------------------
263    /**
264     * Accessed by native methods: provides access to C++ AudioTrack object.
265     */
266    @SuppressWarnings("unused")
267    private long mNativeTrackInJavaObj;
268    /**
269     * Accessed by native methods: provides access to the JNI data (i.e. resources used by
270     * the native AudioTrack object, but not stored in it).
271     */
272    @SuppressWarnings("unused")
273    private long mJniData;
274
275
276    //--------------------------------------------------------------------------
277    // Constructor, Finalize
278    //--------------------
279    /**
280     * Class constructor.
281     * @param streamType the type of the audio stream. See
282     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
283     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
284     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
285     * @param sampleRateInHz the initial source sample rate expressed in Hz.
286     * @param channelConfig describes the configuration of the audio channels.
287     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
288     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
289     * @param audioFormat the format in which the audio data is represented.
290     *   See {@link AudioFormat#ENCODING_PCM_16BIT},
291     *   {@link AudioFormat#ENCODING_PCM_8BIT},
292     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
293     * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
294     *   read from for playback.
295     *   If track's creation mode is {@link #MODE_STREAM}, you can write data into
296     *   this buffer in chunks less than or equal to this size, and it is typical to use
297     *   chunks of 1/2 of the total size to permit double-buffering.
298     *   If the track's creation mode is {@link #MODE_STATIC},
299     *   this is the maximum length sample, or audio clip, that can be played by this instance.
300     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
301     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
302     *   smaller than getMinBufferSize() will result in an initialization failure.
303     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
304     * @throws java.lang.IllegalArgumentException
305     */
306    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
307            int bufferSizeInBytes, int mode)
308    throws IllegalArgumentException {
309        this(streamType, sampleRateInHz, channelConfig, audioFormat,
310                bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE);
311    }
312
313    /**
314     * Class constructor with audio session. Use this constructor when the AudioTrack must be
315     * attached to a particular audio session. The primary use of the audio session ID is to
316     * associate audio effects to a particular instance of AudioTrack: if an audio session ID
317     * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
318     * and media players in the same session and not to the output mix.
319     * When an AudioTrack is created without specifying a session, it will create its own session
320     * which can be retrieved by calling the {@link #getAudioSessionId()} method.
321     * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
322     * session
323     * with all other media players or audio tracks in the same session, otherwise a new session
324     * will be created for this track if none is supplied.
325     * @param streamType the type of the audio stream. See
326     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
327     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
328     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
329     * @param sampleRateInHz the initial source sample rate expressed in Hz.
330     * @param channelConfig describes the configuration of the audio channels.
331     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
332     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
333     * @param audioFormat the format in which the audio data is represented.
334     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
335     *   {@link AudioFormat#ENCODING_PCM_8BIT},
336     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
337     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
338     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
339     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
340     *   this is the maximum size of the sound that will be played for this instance.
341     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
342     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
343     *   smaller than getMinBufferSize() will result in an initialization failure.
344     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
345     * @param sessionId Id of audio session the AudioTrack must be attached to
346     * @throws java.lang.IllegalArgumentException
347     */
348    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
349            int bufferSizeInBytes, int mode, int sessionId)
350    throws IllegalArgumentException {
351        // mState already == STATE_UNINITIALIZED
352
353        // remember which looper is associated with the AudioTrack instantiation
354        Looper looper;
355        if ((looper = Looper.myLooper()) == null) {
356            looper = Looper.getMainLooper();
357        }
358        mInitializationLooper = looper;
359
360        audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);
361
362        audioBuffSizeCheck(bufferSizeInBytes);
363
364        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
365        mAppOps = IAppOpsService.Stub.asInterface(b);
366
367        if (sessionId < 0) {
368            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
369        }
370
371        int[] session = new int[1];
372        session[0] = sessionId;
373        // native initialization
374        int initResult = native_setup(new WeakReference<AudioTrack>(this),
375                mStreamType, mSampleRate, mChannels, mAudioFormat,
376                mNativeBufferSizeInBytes, mDataLoadMode, session);
377        if (initResult != SUCCESS) {
378            loge("Error code "+initResult+" when initializing AudioTrack.");
379            return; // with mState == STATE_UNINITIALIZED
380        }
381
382        mSessionId = session[0];
383
384        if (mDataLoadMode == MODE_STATIC) {
385            mState = STATE_NO_STATIC_DATA;
386        } else {
387            mState = STATE_INITIALIZED;
388        }
389    }
390
391    // mask of all the channels supported by this implementation
392    private static final int SUPPORTED_OUT_CHANNELS =
393            AudioFormat.CHANNEL_OUT_FRONT_LEFT |
394            AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
395            AudioFormat.CHANNEL_OUT_FRONT_CENTER |
396            AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
397            AudioFormat.CHANNEL_OUT_BACK_LEFT |
398            AudioFormat.CHANNEL_OUT_BACK_RIGHT |
399            AudioFormat.CHANNEL_OUT_BACK_CENTER;
400
401    // Convenience method for the constructor's parameter checks.
402    // This is where constructor IllegalArgumentException-s are thrown
403    // postconditions:
404    //    mStreamType is valid
405    //    mChannelCount is valid
406    //    mChannels is valid
407    //    mAudioFormat is valid
408    //    mSampleRate is valid
409    //    mDataLoadMode is valid
410    private void audioParamCheck(int streamType, int sampleRateInHz,
411                                 int channelConfig, int audioFormat, int mode) {
412
413        //--------------
414        // stream type
415        if( (streamType != AudioManager.STREAM_ALARM) && (streamType != AudioManager.STREAM_MUSIC)
416           && (streamType != AudioManager.STREAM_RING) && (streamType != AudioManager.STREAM_SYSTEM)
417           && (streamType != AudioManager.STREAM_VOICE_CALL)
418           && (streamType != AudioManager.STREAM_NOTIFICATION)
419           && (streamType != AudioManager.STREAM_BLUETOOTH_SCO)
420           && (streamType != AudioManager.STREAM_DTMF)) {
421            throw new IllegalArgumentException("Invalid stream type.");
422        }
423        mStreamType = streamType;
424
425        //--------------
426        // sample rate, note these values are subject to change
427        if ( (sampleRateInHz < 4000) || (sampleRateInHz > 48000) ) {
428            throw new IllegalArgumentException(sampleRateInHz
429                    + "Hz is not a supported sample rate.");
430        }
431        mSampleRate = sampleRateInHz;
432
433        //--------------
434        // channel config
435        mChannelConfiguration = channelConfig;
436
437        switch (channelConfig) {
438        case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
439        case AudioFormat.CHANNEL_OUT_MONO:
440        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
441            mChannelCount = 1;
442            mChannels = AudioFormat.CHANNEL_OUT_MONO;
443            break;
444        case AudioFormat.CHANNEL_OUT_STEREO:
445        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
446            mChannelCount = 2;
447            mChannels = AudioFormat.CHANNEL_OUT_STEREO;
448            break;
449        default:
450            if (!isMultichannelConfigSupported(channelConfig)) {
451                // input channel configuration features unsupported channels
452                throw new IllegalArgumentException("Unsupported channel configuration.");
453            }
454            mChannels = channelConfig;
455            mChannelCount = Integer.bitCount(channelConfig);
456        }
457
458        //--------------
459        // audio format
460        switch (audioFormat) {
461        case AudioFormat.ENCODING_DEFAULT:
462            mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
463            break;
464        case AudioFormat.ENCODING_PCM_16BIT:
465        case AudioFormat.ENCODING_PCM_8BIT:
466        case AudioFormat.ENCODING_PCM_FLOAT:
467            mAudioFormat = audioFormat;
468            break;
469        default:
470            throw new IllegalArgumentException("Unsupported sample encoding."
471                + " Should be ENCODING_PCM_8BIT or ENCODING_PCM_16BIT"
472                + " or ENCODING_PCM_FLOAT"
473                + ".");
474        }
475
476        //--------------
477        // audio load mode
478        if ( (mode != MODE_STREAM) && (mode != MODE_STATIC) ) {
479            throw new IllegalArgumentException("Invalid mode.");
480        }
481        mDataLoadMode = mode;
482    }
483
484    /**
485     * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
486     * @param channelConfig the mask to validate
487     * @return false if the AudioTrack can't be used with such a mask
488     */
489    private static boolean isMultichannelConfigSupported(int channelConfig) {
490        // check for unsupported channels
491        if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
492            loge("Channel configuration features unsupported channels");
493            return false;
494        }
495        // check for unsupported multichannel combinations:
496        // - FL/FR must be present
497        // - L/R channels must be paired (e.g. no single L channel)
498        final int frontPair =
499                AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
500        if ((channelConfig & frontPair) != frontPair) {
501                loge("Front channels must be present in multichannel configurations");
502                return false;
503        }
504        final int backPair =
505                AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
506        if ((channelConfig & backPair) != 0) {
507            if ((channelConfig & backPair) != backPair) {
508                loge("Rear channels can't be used independently");
509                return false;
510            }
511        }
512        return true;
513    }
514
515
516    // Convenience method for the constructor's audio buffer size check.
517    // preconditions:
518    //    mChannelCount is valid
519    //    mAudioFormat is valid
520    // postcondition:
521    //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
522    private void audioBuffSizeCheck(int audioBufferSize) {
523        // NB: this section is only valid with PCM data.
524        //     To update when supporting compressed formats
525        int frameSizeInBytes = mChannelCount
526                * (AudioFormat.getBytesPerSample(mAudioFormat));
527        if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
528            throw new IllegalArgumentException("Invalid audio buffer size.");
529        }
530
531        mNativeBufferSizeInBytes = audioBufferSize;
532        mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
533    }
534
535
536    /**
537     * Releases the native AudioTrack resources.
538     */
539    public void release() {
540        // even though native_release() stops the native AudioTrack, we need to stop
541        // AudioTrack subclasses too.
542        try {
543            stop();
544        } catch(IllegalStateException ise) {
545            // don't raise an exception, we're releasing the resources.
546        }
547        native_release();
548        mState = STATE_UNINITIALIZED;
549    }
550
551    @Override
552    protected void finalize() {
553        native_finalize();
554    }
555
556    //--------------------------------------------------------------------------
557    // Getters
558    //--------------------
559    /**
560     * Returns the minimum gain value, which is the constant 0.0.
561     * Gain values less than 0.0 will be clamped to 0.0.
562     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
563     * @return the minimum value, which is the constant 0.0.
564     */
565    static public float getMinVolume() {
566        return GAIN_MIN;
567    }
568
569    /**
570     * Returns the maximum gain value, which is greater than or equal to 1.0.
571     * Gain values greater than the maximum will be clamped to the maximum.
572     * <p>The word "volume" in the API name is historical; this is actually a gain.
573     * expressed as a linear multiplier on sample values, where a maximum value of 1.0
574     * corresponds to a gain of 0 dB (sample values left unmodified).
575     * @return the maximum value, which is greater than or equal to 1.0.
576     */
577    static public float getMaxVolume() {
578        return GAIN_MAX;
579    }
580
581    /**
582     * Returns the configured audio data sample rate in Hz
583     */
584    public int getSampleRate() {
585        return mSampleRate;
586    }
587
588    /**
589     * Returns the current playback rate in Hz.
590     */
591    public int getPlaybackRate() {
592        return native_get_playback_rate();
593    }
594
595    /**
596     * Returns the configured audio data format. See {@link AudioFormat#ENCODING_PCM_16BIT}
597     * and {@link AudioFormat#ENCODING_PCM_8BIT}.
598     */
599    public int getAudioFormat() {
600        return mAudioFormat;
601    }
602
603    /**
604     * Returns the type of audio stream this AudioTrack is configured for.
605     * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
606     * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
607     * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
608     * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
609     */
610    public int getStreamType() {
611        return mStreamType;
612    }
613
614    /**
615     * Returns the configured channel configuration.
616     * See {@link AudioFormat#CHANNEL_OUT_MONO}
617     * and {@link AudioFormat#CHANNEL_OUT_STEREO}.
618     */
619    public int getChannelConfiguration() {
620        return mChannelConfiguration;
621    }
622
623    /**
624     * Returns the configured number of channels.
625     */
626    public int getChannelCount() {
627        return mChannelCount;
628    }
629
630    /**
631     * Returns the state of the AudioTrack instance. This is useful after the
632     * AudioTrack instance has been created to check if it was initialized
633     * properly. This ensures that the appropriate resources have been acquired.
634     * @see #STATE_INITIALIZED
635     * @see #STATE_NO_STATIC_DATA
636     * @see #STATE_UNINITIALIZED
637     */
638    public int getState() {
639        return mState;
640    }
641
642    /**
643     * Returns the playback state of the AudioTrack instance.
644     * @see #PLAYSTATE_STOPPED
645     * @see #PLAYSTATE_PAUSED
646     * @see #PLAYSTATE_PLAYING
647     */
648    public int getPlayState() {
649        synchronized (mPlayStateLock) {
650            return mPlayState;
651        }
652    }
653
654    /**
655     *  Returns the "native frame count", derived from the bufferSizeInBytes specified at
656     *  creation time and converted to frame units.
657     *  If track's creation mode is {@link #MODE_STATIC},
658     *  it is equal to the specified bufferSizeInBytes converted to frame units.
659     *  If track's creation mode is {@link #MODE_STREAM},
660     *  it is typically greater than or equal to the specified bufferSizeInBytes converted to frame
661     *  units; it may be rounded up to a larger value if needed by the target device implementation.
662     *  @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
663     *  See {@link AudioManager#getProperty(String)} for key
664     *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
665     */
666    @Deprecated
667    protected int getNativeFrameCount() {
668        return native_get_native_frame_count();
669    }
670
671    /**
672     * Returns marker position expressed in frames.
673     * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
674     * or zero if marker is disabled.
675     */
676    public int getNotificationMarkerPosition() {
677        return native_get_marker_pos();
678    }
679
680    /**
681     * Returns the notification update period expressed in frames.
682     * Zero means that no position update notifications are being delivered.
683     */
684    public int getPositionNotificationPeriod() {
685        return native_get_pos_update_period();
686    }
687
688    /**
689     * Returns the playback head position expressed in frames.
690     * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
691     * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
692     * This is a continuously advancing counter.  It will wrap (overflow) periodically,
693     * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
694     * It is reset to zero by flush(), reload(), and stop().
695     */
696    public int getPlaybackHeadPosition() {
697        return native_get_position();
698    }
699
700    /**
701     * Returns this track's estimated latency in milliseconds. This includes the latency due
702     * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
703     *
704     * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
705     * a better solution.
706     * @hide
707     */
708    public int getLatency() {
709        return native_get_latency();
710    }
711
712    /**
713     *  Returns the output sample rate in Hz for the specified stream type.
714     */
715    static public int getNativeOutputSampleRate(int streamType) {
716        return native_get_output_sample_rate(streamType);
717    }
718
719    /**
720     * Returns the minimum buffer size required for the successful creation of an AudioTrack
721     * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
722     * guarantee a smooth playback under load, and higher values should be chosen according to
723     * the expected frequency at which the buffer will be refilled with additional data to play.
724     * For example, if you intend to dynamically set the source sample rate of an AudioTrack
725     * to a higher value than the initial source sample rate, be sure to configure the buffer size
726     * based on the highest planned sample rate.
727     * @param sampleRateInHz the source sample rate expressed in Hz.
728     * @param channelConfig describes the configuration of the audio channels.
729     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
730     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
731     * @param audioFormat the format in which the audio data is represented.
732     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
733     *   {@link AudioFormat#ENCODING_PCM_8BIT},
734     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
735     * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
736     *   or {@link #ERROR} if unable to query for output properties,
737     *   or the minimum buffer size expressed in bytes.
738     */
739    static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
740        int channelCount = 0;
741        switch(channelConfig) {
742        case AudioFormat.CHANNEL_OUT_MONO:
743        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
744            channelCount = 1;
745            break;
746        case AudioFormat.CHANNEL_OUT_STEREO:
747        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
748            channelCount = 2;
749            break;
750        default:
751            if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
752                // input channel configuration features unsupported channels
753                loge("getMinBufferSize(): Invalid channel configuration.");
754                return ERROR_BAD_VALUE;
755            } else {
756                channelCount = Integer.bitCount(channelConfig);
757            }
758        }
759
760        if ((audioFormat != AudioFormat.ENCODING_PCM_16BIT)
761            && (audioFormat != AudioFormat.ENCODING_PCM_8BIT)
762            && (audioFormat != AudioFormat.ENCODING_PCM_FLOAT)) {
763            loge("getMinBufferSize(): Invalid audio format.");
764            return ERROR_BAD_VALUE;
765        }
766
767        // sample rate, note these values are subject to change
768        if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {
769            loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
770            return ERROR_BAD_VALUE;
771        }
772
773        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
774        if (size <= 0) {
775            loge("getMinBufferSize(): error querying hardware");
776            return ERROR;
777        }
778        else {
779            return size;
780        }
781    }
782
783    /**
784     * Returns the audio session ID.
785     *
786     * @return the ID of the audio session this AudioTrack belongs to.
787     */
788    public int getAudioSessionId() {
789        return mSessionId;
790    }
791
792   /**
793    * Poll for a timestamp on demand.
794    * <p>
795    * If you need to track timestamps during initial warmup or after a routing or mode change,
796    * you should request a new timestamp once per second until the reported timestamps
797    * show that the audio clock is stable.
798    * Thereafter, query for a new timestamp approximately once every 10 seconds to once per minute.
799    * Calling this method more often is inefficient.
800    * It is also counter-productive to call this method more often than recommended,
801    * because the short-term differences between successive timestamp reports are not meaningful.
802    * If you need a high-resolution mapping between frame position and presentation time,
803    * consider implementing that at application level, based on low-resolution timestamps.
804    * <p>
805    * The audio data at the returned position may either already have been
806    * presented, or may have not yet been presented but is committed to be presented.
807    * It is not possible to request the time corresponding to a particular position,
808    * or to request the (fractional) position corresponding to a particular time.
809    * If you need such features, consider implementing them at application level.
810    *
811    * @param timestamp a reference to a non-null AudioTimestamp instance allocated
812    *        and owned by caller.
813    * @return true if a timestamp is available, or false if no timestamp is available.
814    *         If a timestamp if available,
815    *         the AudioTimestamp instance is filled in with a position in frame units, together
816    *         with the estimated time when that frame was presented or is committed to
817    *         be presented.
818    *         In the case that no timestamp is available, any supplied instance is left unaltered.
819    *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
820    *         or during and immediately after a route change.
821    */
822    // Add this text when the "on new timestamp" API is added:
823    //   Use if you need to get the most recent timestamp outside of the event callback handler.
824    public boolean getTimestamp(AudioTimestamp timestamp)
825    {
826        if (timestamp == null) {
827            throw new IllegalArgumentException();
828        }
829        // It's unfortunate, but we have to either create garbage every time or use synchronized
830        long[] longArray = new long[2];
831        int ret = native_get_timestamp(longArray);
832        if (ret != SUCCESS) {
833            return false;
834        }
835        timestamp.framePosition = longArray[0];
836        timestamp.nanoTime = longArray[1];
837        return true;
838    }
839
840
841    //--------------------------------------------------------------------------
842    // Initialization / configuration
843    //--------------------
844    /**
845     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
846     * for each periodic playback head position update.
847     * Notifications will be received in the same thread as the one in which the AudioTrack
848     * instance was created.
849     * @param listener
850     */
851    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
852        setPlaybackPositionUpdateListener(listener, null);
853    }
854
855    /**
856     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
857     * for each periodic playback head position update.
858     * Use this method to receive AudioTrack events in the Handler associated with another
859     * thread than the one in which you created the AudioTrack instance.
860     * @param listener
861     * @param handler the Handler that will receive the event notification messages.
862     */
863    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
864                                                    Handler handler) {
865        if (listener != null) {
866            mEventHandlerDelegate = new NativeEventHandlerDelegate(this, listener, handler);
867        } else {
868            mEventHandlerDelegate = null;
869        }
870    }
871
872
873    private static float clampGainOrLevel(float gainOrLevel) {
874        if (Float.isNaN(gainOrLevel)) {
875            throw new IllegalArgumentException();
876        }
877        if (gainOrLevel < GAIN_MIN) {
878            gainOrLevel = GAIN_MIN;
879        } else if (gainOrLevel > GAIN_MAX) {
880            gainOrLevel = GAIN_MAX;
881        }
882        return gainOrLevel;
883    }
884
885
886     /**
887     * Sets the specified left and right output gain values on the AudioTrack.
888     * <p>Gain values are clamped to the closed interval [0.0, max] where
889     * max is the value of {@link #getMaxVolume}.
890     * A value of 0.0 results in zero gain (silence), and
891     * a value of 1.0 means unity gain (signal unchanged).
892     * The default value is 1.0 meaning unity gain.
893     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
894     * @param leftGain output gain for the left channel.
895     * @param rightGain output gain for the right channel
896     * @return error code or success, see {@link #SUCCESS},
897     *    {@link #ERROR_INVALID_OPERATION}
898     * @deprecated Applications should use {@link #setVolume} instead, as it
899     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
900     */
901    public int setStereoVolume(float leftGain, float rightGain) {
902        if (isRestricted()) {
903            return SUCCESS;
904        }
905        if (mState == STATE_UNINITIALIZED) {
906            return ERROR_INVALID_OPERATION;
907        }
908
909        leftGain = clampGainOrLevel(leftGain);
910        rightGain = clampGainOrLevel(rightGain);
911
912        native_setVolume(leftGain, rightGain);
913
914        return SUCCESS;
915    }
916
917
918    /**
919     * Sets the specified output gain value on all channels of this track.
920     * <p>Gain values are clamped to the closed interval [0.0, max] where
921     * max is the value of {@link #getMaxVolume}.
922     * A value of 0.0 results in zero gain (silence), and
923     * a value of 1.0 means unity gain (signal unchanged).
924     * The default value is 1.0 meaning unity gain.
925     * <p>This API is preferred over {@link #setStereoVolume}, as it
926     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
927     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
928     * @param gain output gain for all channels.
929     * @return error code or success, see {@link #SUCCESS},
930     *    {@link #ERROR_INVALID_OPERATION}
931     */
932    public int setVolume(float gain) {
933        return setStereoVolume(gain, gain);
934    }
935
936
937    /**
938     * Sets the playback sample rate for this track. This sets the sampling rate at which
939     * the audio data will be consumed and played back
940     * (as set by the sampleRateInHz parameter in the
941     * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
942     * not the original sampling rate of the
943     * content. For example, setting it to half the sample rate of the content will cause the
944     * playback to last twice as long, but will also result in a pitch shift down by one octave.
945     * The valid sample rate range is from 1 Hz to twice the value returned by
946     * {@link #getNativeOutputSampleRate(int)}.
947     * @param sampleRateInHz the sample rate expressed in Hz
948     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
949     *    {@link #ERROR_INVALID_OPERATION}
950     */
951    public int setPlaybackRate(int sampleRateInHz) {
952        if (mState != STATE_INITIALIZED) {
953            return ERROR_INVALID_OPERATION;
954        }
955        if (sampleRateInHz <= 0) {
956            return ERROR_BAD_VALUE;
957        }
958        return native_set_playback_rate(sampleRateInHz);
959    }
960
961
962    /**
963     * Sets the position of the notification marker.  At most one marker can be active.
964     * @param markerInFrames marker position in wrapping frame units similar to
965     * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
966     * To set a marker at a position which would appear as zero due to wraparound,
967     * a workaround is to use a non-zero position near zero, such as -1 or 1.
968     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
969     *  {@link #ERROR_INVALID_OPERATION}
970     */
971    public int setNotificationMarkerPosition(int markerInFrames) {
972        if (mState == STATE_UNINITIALIZED) {
973            return ERROR_INVALID_OPERATION;
974        }
975        return native_set_marker_pos(markerInFrames);
976    }
977
978
979    /**
980     * Sets the period for the periodic notification event.
981     * @param periodInFrames update period expressed in frames
982     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
983     */
984    public int setPositionNotificationPeriod(int periodInFrames) {
985        if (mState == STATE_UNINITIALIZED) {
986            return ERROR_INVALID_OPERATION;
987        }
988        return native_set_pos_update_period(periodInFrames);
989    }
990
991
992    /**
993     * Sets the playback head position.
994     * The track must be stopped or paused for the position to be changed,
995     * and must use the {@link #MODE_STATIC} mode.
996     * @param positionInFrames playback head position expressed in frames
997     * Zero corresponds to start of buffer.
998     * The position must not be greater than the buffer size in frames, or negative.
999     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1000     *    {@link #ERROR_INVALID_OPERATION}
1001     */
1002    public int setPlaybackHeadPosition(int positionInFrames) {
1003        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1004                getPlayState() == PLAYSTATE_PLAYING) {
1005            return ERROR_INVALID_OPERATION;
1006        }
1007        if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
1008            return ERROR_BAD_VALUE;
1009        }
1010        return native_set_position(positionInFrames);
1011    }
1012
1013    /**
1014     * Sets the loop points and the loop count. The loop can be infinite.
1015     * Similarly to setPlaybackHeadPosition,
1016     * the track must be stopped or paused for the loop points to be changed,
1017     * and must use the {@link #MODE_STATIC} mode.
1018     * @param startInFrames loop start marker expressed in frames
1019     * Zero corresponds to start of buffer.
1020     * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1021     * @param endInFrames loop end marker expressed in frames
1022     * The total buffer size in frames corresponds to end of buffer.
1023     * The end marker must not be greater than the buffer size in frames.
1024     * For looping, the end marker must not be less than or equal to the start marker,
1025     * but to disable looping
1026     * it is permitted for start marker, end marker, and loop count to all be 0.
1027     * @param loopCount the number of times the loop is looped.
1028     *    A value of -1 means infinite looping, and 0 disables looping.
1029     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1030     *    {@link #ERROR_INVALID_OPERATION}
1031     */
1032    public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1033        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1034                getPlayState() == PLAYSTATE_PLAYING) {
1035            return ERROR_INVALID_OPERATION;
1036        }
1037        if (loopCount == 0) {
1038            ;   // explicitly allowed as an exception to the loop region range check
1039        } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1040                startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1041            return ERROR_BAD_VALUE;
1042        }
1043        return native_set_loop(startInFrames, endInFrames, loopCount);
1044    }
1045
1046    /**
1047     * Sets the initialization state of the instance. This method was originally intended to be used
1048     * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1049     * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1050     * @param state the state of the AudioTrack instance
1051     * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1052     */
1053    @Deprecated
1054    protected void setState(int state) {
1055        mState = state;
1056    }
1057
1058
1059    //---------------------------------------------------------
1060    // Transport control methods
1061    //--------------------
1062    /**
1063     * Starts playing an AudioTrack.
1064     * If track's creation mode is {@link #MODE_STATIC}, you must have called write() prior.
1065     *
1066     * @throws IllegalStateException
1067     */
1068    public void play()
1069    throws IllegalStateException {
1070        if (mState != STATE_INITIALIZED) {
1071            throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1072        }
1073        if (isRestricted()) {
1074            setVolume(0);
1075        }
1076        synchronized(mPlayStateLock) {
1077            native_start();
1078            mPlayState = PLAYSTATE_PLAYING;
1079        }
1080    }
1081
1082    private boolean isRestricted() {
1083        try {
1084            final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, mStreamType,
1085                    Process.myUid(), ActivityThread.currentPackageName());
1086            return mode != AppOpsManager.MODE_ALLOWED;
1087        } catch (RemoteException e) {
1088            return false;
1089        }
1090    }
1091
1092    /**
1093     * Stops playing the audio data.
1094     * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
1095     * after the last buffer that was written has been played. For an immediate stop, use
1096     * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
1097     * back yet.
1098     * @throws IllegalStateException
1099     */
1100    public void stop()
1101    throws IllegalStateException {
1102        if (mState != STATE_INITIALIZED) {
1103            throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
1104        }
1105
1106        // stop playing
1107        synchronized(mPlayStateLock) {
1108            native_stop();
1109            mPlayState = PLAYSTATE_STOPPED;
1110        }
1111    }
1112
1113    /**
1114     * Pauses the playback of the audio data. Data that has not been played
1115     * back will not be discarded. Subsequent calls to {@link #play} will play
1116     * this data back. See {@link #flush()} to discard this data.
1117     *
1118     * @throws IllegalStateException
1119     */
1120    public void pause()
1121    throws IllegalStateException {
1122        if (mState != STATE_INITIALIZED) {
1123            throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
1124        }
1125        //logd("pause()");
1126
1127        // pause playback
1128        synchronized(mPlayStateLock) {
1129            native_pause();
1130            mPlayState = PLAYSTATE_PAUSED;
1131        }
1132    }
1133
1134
1135    //---------------------------------------------------------
1136    // Audio data supply
1137    //--------------------
1138
1139    /**
1140     * Flushes the audio data currently queued for playback. Any data that has
1141     * not been played back will be discarded.  No-op if not stopped or paused,
1142     * or if the track's creation mode is not {@link #MODE_STREAM}.
1143     */
1144    public void flush() {
1145        if (mState == STATE_INITIALIZED) {
1146            // flush the data in native layer
1147            native_flush();
1148        }
1149
1150    }
1151
1152    /**
1153     * Writes the audio data to the audio sink for playback (streaming mode),
1154     * or copies audio data for later playback (static buffer mode).
1155     * In streaming mode, will block until all data has been written to the audio sink.
1156     * In static buffer mode, copies the data to the buffer starting at offset 0.
1157     * Note that the actual playback of this data might occur after this function
1158     * returns. This function is thread safe with respect to {@link #stop} calls,
1159     * in which case all of the specified data might not be written to the audio sink.
1160     *
1161     * @param audioData the array that holds the data to play.
1162     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
1163     *    starts.
1164     * @param sizeInBytes the number of bytes to read in audioData after the offset.
1165     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
1166     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1167     *    the parameters don't resolve to valid data and indexes.
1168     */
1169
1170    public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) {
1171
1172        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1173            return ERROR_INVALID_OPERATION;
1174        }
1175
1176        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
1177                || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
1178                || (offsetInBytes + sizeInBytes > audioData.length)) {
1179            return ERROR_BAD_VALUE;
1180        }
1181
1182        int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
1183                true /*isBlocking*/);
1184
1185        if ((mDataLoadMode == MODE_STATIC)
1186                && (mState == STATE_NO_STATIC_DATA)
1187                && (ret > 0)) {
1188            // benign race with respect to other APIs that read mState
1189            mState = STATE_INITIALIZED;
1190        }
1191
1192        return ret;
1193    }
1194
1195
1196    /**
1197     * Writes the audio data to the audio sink for playback (streaming mode),
1198     * or copies audio data for later playback (static buffer mode).
1199     * In streaming mode, will block until all data has been written to the audio sink.
1200     * In static buffer mode, copies the data to the buffer starting at offset 0.
1201     * Note that the actual playback of this data might occur after this function
1202     * returns. This function is thread safe with respect to {@link #stop} calls,
1203     * in which case all of the specified data might not be written to the audio sink.
1204     *
1205     * @param audioData the array that holds the data to play.
1206     * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1207     *     starts.
1208     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1209     * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION}
1210     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1211     *    the parameters don't resolve to valid data and indexes.
1212     */
1213
1214    public int write(short[] audioData, int offsetInShorts, int sizeInShorts) {
1215
1216        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1217            return ERROR_INVALID_OPERATION;
1218        }
1219
1220        if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
1221                || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
1222                || (offsetInShorts + sizeInShorts > audioData.length)) {
1223            return ERROR_BAD_VALUE;
1224        }
1225
1226        int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat);
1227
1228        if ((mDataLoadMode == MODE_STATIC)
1229                && (mState == STATE_NO_STATIC_DATA)
1230                && (ret > 0)) {
1231            // benign race with respect to other APIs that read mState
1232            mState = STATE_INITIALIZED;
1233        }
1234
1235        return ret;
1236    }
1237
1238
1239    /**
1240     * Writes the audio data to the audio sink for playback (streaming mode),
1241     * or copies audio data for later playback (static buffer mode).
1242     * In static buffer mode, copies the data to the buffer starting at offset 0,
1243     * and the write mode is ignored.
1244     * In streaming mode, the blocking behavior will depend on the write mode.
1245     * <p>
1246     * Note that the actual playback of this data might occur after this function
1247     * returns. This function is thread safe with respect to {@link #stop} calls,
1248     * in which case all of the specified data might not be written to the audio sink.
1249     * <p>
1250     * @param audioData the array that holds the data to play.
1251     *     The implementation does not clip for sample values within the nominal range
1252     *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
1253     *     less than or equal to unity (1.0f), and in the absence of post-processing effects
1254     *     that could add energy, such as reverb.  For the convenience of applications
1255     *     that compute samples using filters with non-unity gain,
1256     *     sample values +3 dB beyond the nominal range are permitted.
1257     *     However such values may eventually be limited or clipped, depending on various gains
1258     *     and later processing in the audio path.  Therefore applications are encouraged
1259     *     to provide samples values within the nominal range.
1260     * @param offsetInFloats the offset, expressed as a number of floats,
1261     *     in audioData where the data to play starts.
1262     * @param sizeInFloats the number of floats to read in audioData after the offset.
1263     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1264     *     effect in static mode.
1265     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1266     *         to the audio sink.
1267     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1268     *     queuing as much audio data for playback as possible without blocking.
1269     * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION}
1270     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1271     *    the parameters don't resolve to valid data and indexes.
1272     */
1273    public int write(float[] audioData, int offsetInFloats, int sizeInFloats,
1274            @WriteMode int writeMode) {
1275
1276        if (mState == STATE_UNINITIALIZED) {
1277            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1278            return ERROR_INVALID_OPERATION;
1279        }
1280
1281        if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
1282            Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
1283            return ERROR_INVALID_OPERATION;
1284        }
1285
1286        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1287            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1288            return ERROR_BAD_VALUE;
1289        }
1290
1291        if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
1292                || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
1293                || (offsetInFloats + sizeInFloats > audioData.length)) {
1294            Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
1295            return ERROR_BAD_VALUE;
1296        }
1297
1298        int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
1299                writeMode == WRITE_BLOCKING);
1300
1301        if ((mDataLoadMode == MODE_STATIC)
1302                && (mState == STATE_NO_STATIC_DATA)
1303                && (ret > 0)) {
1304            // benign race with respect to other APIs that read mState
1305            mState = STATE_INITIALIZED;
1306        }
1307
1308        return ret;
1309    }
1310
1311
1312    /**
1313     * Writes the audio data to the audio sink for playback (streaming mode),
1314     * or copies audio data for later playback (static buffer mode).
1315     * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write
1316     * mode is ignored.
1317     * In streaming mode, the blocking behavior will depend on the write mode.
1318     * @param audioData the buffer that holds the data to play, starting at the position reported
1319     *     by <code>audioData.position()</code>.
1320     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
1321     *     have been advanced to reflect the amount of data that was successfully written to
1322     *     the AudioTrack.
1323     * @param sizeInBytes number of bytes to write.
1324     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
1325     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1326     *     effect in static mode.
1327     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1328     *         to the audio sink.
1329     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1330     *     queuing as much audio data for playback as possible without blocking.
1331     * @return 0 or a positive number of bytes that were written, or
1332     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1333     */
1334    public int write(ByteBuffer audioData, int sizeInBytes,
1335            @WriteMode int writeMode) {
1336
1337        if (mState == STATE_UNINITIALIZED) {
1338            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1339            return ERROR_INVALID_OPERATION;
1340        }
1341
1342        if (mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1343            Log.e(TAG, "AudioTrack.write(ByteBuffer ...) not yet supported for ENCODING_PCM_FLOAT");
1344            return ERROR_INVALID_OPERATION;
1345        }
1346
1347        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1348            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1349            return ERROR_BAD_VALUE;
1350        }
1351
1352        if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
1353            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
1354            return ERROR_BAD_VALUE;
1355        }
1356
1357        int ret = 0;
1358        if (audioData.isDirect()) {
1359            ret = native_write_native_bytes(audioData,
1360                    audioData.position(), sizeInBytes, mAudioFormat,
1361                    writeMode == WRITE_BLOCKING);
1362        } else {
1363            ret = native_write_byte(NioUtils.unsafeArray(audioData),
1364                    NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
1365                    sizeInBytes, mAudioFormat,
1366                    writeMode == WRITE_BLOCKING);
1367        }
1368
1369        if ((mDataLoadMode == MODE_STATIC)
1370                && (mState == STATE_NO_STATIC_DATA)
1371                && (ret > 0)) {
1372            // benign race with respect to other APIs that read mState
1373            mState = STATE_INITIALIZED;
1374        }
1375
1376        if (ret > 0) {
1377            audioData.position(audioData.position() + ret);
1378        }
1379
1380        return ret;
1381    }
1382
1383    /**
1384     * Notifies the native resource to reuse the audio data already loaded in the native
1385     * layer, that is to rewind to start of buffer.
1386     * The track's creation mode must be {@link #MODE_STATIC}.
1387     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1388     *  {@link #ERROR_INVALID_OPERATION}
1389     */
1390    public int reloadStaticData() {
1391        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
1392            return ERROR_INVALID_OPERATION;
1393        }
1394        return native_reload_static();
1395    }
1396
1397    //--------------------------------------------------------------------------
1398    // Audio effects management
1399    //--------------------
1400
1401    /**
1402     * Attaches an auxiliary effect to the audio track. A typical auxiliary
1403     * effect is a reverberation effect which can be applied on any sound source
1404     * that directs a certain amount of its energy to this effect. This amount
1405     * is defined by setAuxEffectSendLevel().
1406     * {@see #setAuxEffectSendLevel(float)}.
1407     * <p>After creating an auxiliary effect (e.g.
1408     * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
1409     * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
1410     * this method to attach the audio track to the effect.
1411     * <p>To detach the effect from the audio track, call this method with a
1412     * null effect id.
1413     *
1414     * @param effectId system wide unique id of the effect to attach
1415     * @return error code or success, see {@link #SUCCESS},
1416     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
1417     */
1418    public int attachAuxEffect(int effectId) {
1419        if (mState == STATE_UNINITIALIZED) {
1420            return ERROR_INVALID_OPERATION;
1421        }
1422        return native_attachAuxEffect(effectId);
1423    }
1424
1425    /**
1426     * Sets the send level of the audio track to the attached auxiliary effect
1427     * {@link #attachAuxEffect(int)}.  Effect levels
1428     * are clamped to the closed interval [0.0, max] where
1429     * max is the value of {@link #getMaxVolume}.
1430     * A value of 0.0 results in no effect, and a value of 1.0 is full send.
1431     * <p>By default the send level is 0.0f, so even if an effect is attached to the player
1432     * this method must be called for the effect to be applied.
1433     * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
1434     * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
1435     * so an appropriate conversion from linear UI input x to level is:
1436     * x == 0 -&gt; level = 0
1437     * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
1438     *
1439     * @param level linear send level
1440     * @return error code or success, see {@link #SUCCESS},
1441     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
1442     */
1443    public int setAuxEffectSendLevel(float level) {
1444        if (isRestricted()) {
1445            return SUCCESS;
1446        }
1447        if (mState == STATE_UNINITIALIZED) {
1448            return ERROR_INVALID_OPERATION;
1449        }
1450        level = clampGainOrLevel(level);
1451        int err = native_setAuxEffectSendLevel(level);
1452        return err == 0 ? SUCCESS : ERROR;
1453    }
1454
1455    //---------------------------------------------------------
1456    // Interface definitions
1457    //--------------------
1458    /**
1459     * Interface definition for a callback to be invoked when the playback head position of
1460     * an AudioTrack has reached a notification marker or has increased by a certain period.
1461     */
1462    public interface OnPlaybackPositionUpdateListener  {
1463        /**
1464         * Called on the listener to notify it that the previously set marker has been reached
1465         * by the playback head.
1466         */
1467        void onMarkerReached(AudioTrack track);
1468
1469        /**
1470         * Called on the listener to periodically notify it that the playback head has reached
1471         * a multiple of the notification period.
1472         */
1473        void onPeriodicNotification(AudioTrack track);
1474    }
1475
1476
1477    //---------------------------------------------------------
1478    // Inner classes
1479    //--------------------
1480    /**
1481     * Helper class to handle the forwarding of native events to the appropriate listener
1482     * (potentially) handled in a different thread
1483     */
1484    private class NativeEventHandlerDelegate {
1485        private final Handler mHandler;
1486
1487        NativeEventHandlerDelegate(final AudioTrack track,
1488                                   final OnPlaybackPositionUpdateListener listener,
1489                                   Handler handler) {
1490            // find the looper for our new event handler
1491            Looper looper;
1492            if (handler != null) {
1493                looper = handler.getLooper();
1494            } else {
1495                // no given handler, use the looper the AudioTrack was created in
1496                looper = mInitializationLooper;
1497            }
1498
1499            // construct the event handler with this looper
1500            if (looper != null) {
1501                // implement the event handler delegate
1502                mHandler = new Handler(looper) {
1503                    @Override
1504                    public void handleMessage(Message msg) {
1505                        if (track == null) {
1506                            return;
1507                        }
1508                        switch(msg.what) {
1509                        case NATIVE_EVENT_MARKER:
1510                            if (listener != null) {
1511                                listener.onMarkerReached(track);
1512                            }
1513                            break;
1514                        case NATIVE_EVENT_NEW_POS:
1515                            if (listener != null) {
1516                                listener.onPeriodicNotification(track);
1517                            }
1518                            break;
1519                        default:
1520                            loge("Unknown native event type: " + msg.what);
1521                            break;
1522                        }
1523                    }
1524                };
1525            } else {
1526                mHandler = null;
1527            }
1528        }
1529
1530        Handler getHandler() {
1531            return mHandler;
1532        }
1533    }
1534
1535
1536    //---------------------------------------------------------
1537    // Java methods called from the native side
1538    //--------------------
1539    @SuppressWarnings("unused")
1540    private static void postEventFromNative(Object audiotrack_ref,
1541            int what, int arg1, int arg2, Object obj) {
1542        //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
1543        AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
1544        if (track == null) {
1545            return;
1546        }
1547
1548        NativeEventHandlerDelegate delegate = track.mEventHandlerDelegate;
1549        if (delegate != null) {
1550            Handler handler = delegate.getHandler();
1551            if (handler != null) {
1552                Message m = handler.obtainMessage(what, arg1, arg2, obj);
1553                handler.sendMessage(m);
1554            }
1555        }
1556
1557    }
1558
1559
1560    //---------------------------------------------------------
1561    // Native methods called from the Java side
1562    //--------------------
1563
1564    private native final int native_setup(Object audiotrack_this,
1565            int streamType, int sampleRate, int channelMask, int audioFormat,
1566            int buffSizeInBytes, int mode, int[] sessionId);
1567
1568    private native final void native_finalize();
1569
1570    private native final void native_release();
1571
1572    private native final void native_start();
1573
1574    private native final void native_stop();
1575
1576    private native final void native_pause();
1577
1578    private native final void native_flush();
1579
1580    private native final int native_write_byte(byte[] audioData,
1581                                               int offsetInBytes, int sizeInBytes, int format,
1582                                               boolean isBlocking);
1583
1584    private native final int native_write_short(short[] audioData,
1585                                                int offsetInShorts, int sizeInShorts, int format);
1586
1587    private native final int native_write_float(float[] audioData,
1588                                                int offsetInFloats, int sizeInFloats, int format,
1589                                                boolean isBlocking);
1590
1591    private native final int native_write_native_bytes(Object audioData,
1592            int positionInBytes, int sizeInBytes, int format, boolean blocking);
1593
1594    private native final int native_reload_static();
1595
1596    private native final int native_get_native_frame_count();
1597
1598    private native final void native_setVolume(float leftVolume, float rightVolume);
1599
1600    private native final int native_set_playback_rate(int sampleRateInHz);
1601    private native final int native_get_playback_rate();
1602
1603    private native final int native_set_marker_pos(int marker);
1604    private native final int native_get_marker_pos();
1605
1606    private native final int native_set_pos_update_period(int updatePeriod);
1607    private native final int native_get_pos_update_period();
1608
1609    private native final int native_set_position(int position);
1610    private native final int native_get_position();
1611
1612    private native final int native_get_latency();
1613
1614    // longArray must be a non-null array of length >= 2
1615    // [0] is assigned the frame position
1616    // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
1617    private native final int native_get_timestamp(long[] longArray);
1618
1619    private native final int native_set_loop(int start, int end, int loopCount);
1620
1621    static private native final int native_get_output_sample_rate(int streamType);
1622    static private native final int native_get_min_buff_size(
1623            int sampleRateInHz, int channelConfig, int audioFormat);
1624
1625    private native final int native_attachAuxEffect(int effectId);
1626    private native final int native_setAuxEffectSendLevel(float level);
1627
1628    //---------------------------------------------------------
1629    // Utility methods
1630    //------------------
1631
1632    private static void logd(String msg) {
1633        Log.d(TAG, msg);
1634    }
1635
1636    private static void loge(String msg) {
1637        Log.e(TAG, msg);
1638    }
1639
1640}
1641