AudioTrack.java revision e9111d3067270554fbfe043157bda4a200d15eb4
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.media;
18
19import java.lang.annotation.Retention;
20import java.lang.annotation.RetentionPolicy;
21import java.lang.ref.WeakReference;
22import java.nio.ByteBuffer;
23import java.nio.NioUtils;
24import java.util.Iterator;
25import java.util.Set;
26
27import android.annotation.IntDef;
28import android.app.ActivityThread;
29import android.app.AppOpsManager;
30import android.content.Context;
31import android.os.Handler;
32import android.os.IBinder;
33import android.os.Looper;
34import android.os.Message;
35import android.os.Process;
36import android.os.RemoteException;
37import android.os.ServiceManager;
38import android.util.Log;
39
40import com.android.internal.app.IAppOpsService;
41
42
43/**
44 * The AudioTrack class manages and plays a single audio resource for Java applications.
45 * It allows streaming of PCM audio buffers to the audio sink for playback. This is
46 * achieved by "pushing" the data to the AudioTrack object using one of the
47 *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
48 *  and {@link #write(float[], int, int, int)} methods.
49 *
50 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
51 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
52 * one of the {@code write()} methods. These are blocking and return when the data has been
53 * transferred from the Java layer to the native layer and queued for playback. The streaming
54 * mode is most useful when playing blocks of audio data that for instance are:
55 *
56 * <ul>
57 *   <li>too big to fit in memory because of the duration of the sound to play,</li>
58 *   <li>too big to fit in memory because of the characteristics of the audio data
59 *         (high sampling rate, bits per sample ...)</li>
60 *   <li>received or generated while previously queued audio is playing.</li>
61 * </ul>
62 *
63 * The static mode should be chosen when dealing with short sounds that fit in memory and
64 * that need to be played with the smallest latency possible. The static mode will
65 * therefore be preferred for UI and game sounds that are played often, and with the
66 * smallest overhead possible.
67 *
68 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
69 * The size of this buffer, specified during the construction, determines how long an AudioTrack
70 * can play before running out of data.<br>
71 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
72 * be played from it.<br>
73 * For the streaming mode, data will be written to the audio sink in chunks of
74 * sizes less than or equal to the total buffer size.
75 *
76 * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
77 */
78public class AudioTrack
79{
80    //---------------------------------------------------------
81    // Constants
82    //--------------------
83    /** Minimum value for a linear gain or auxiliary effect level.
84     *  This value must be exactly equal to 0.0f; do not change it.
85     */
86    private static final float GAIN_MIN = 0.0f;
87    /** Maximum value for a linear gain or auxiliary effect level.
88     *  This value must be greater than or equal to 1.0f.
89     */
90    private static final float GAIN_MAX = 1.0f;
91
92    /** Minimum value for sample rate */
93    private static final int SAMPLE_RATE_HZ_MIN = 4000;
94    /** Maximum value for sample rate */
95    private static final int SAMPLE_RATE_HZ_MAX = 48000;
96
97    /** indicates AudioTrack state is stopped */
98    public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
99    /** indicates AudioTrack state is paused */
100    public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
101    /** indicates AudioTrack state is playing */
102    public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
103
104    // keep these values in sync with android_media_AudioTrack.cpp
105    /**
106     * Creation mode where audio data is transferred from Java to the native layer
107     * only once before the audio starts playing.
108     */
109    public static final int MODE_STATIC = 0;
110    /**
111     * Creation mode where audio data is streamed from Java to the native layer
112     * as the audio is playing.
113     */
114    public static final int MODE_STREAM = 1;
115
116    /**
117     * State of an AudioTrack that was not successfully initialized upon creation.
118     */
119    public static final int STATE_UNINITIALIZED = 0;
120    /**
121     * State of an AudioTrack that is ready to be used.
122     */
123    public static final int STATE_INITIALIZED   = 1;
124    /**
125     * State of a successfully initialized AudioTrack that uses static data,
126     * but that hasn't received that data yet.
127     */
128    public static final int STATE_NO_STATIC_DATA = 2;
129
130    /**
131     * Denotes a successful operation.
132     */
133    public  static final int SUCCESS                               = AudioSystem.SUCCESS;
134    /**
135     * Denotes a generic operation failure.
136     */
137    public  static final int ERROR                                 = AudioSystem.ERROR;
138    /**
139     * Denotes a failure due to the use of an invalid value.
140     */
141    public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
142    /**
143     * Denotes a failure due to the improper use of a method.
144     */
145    public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
146
147    // Error codes:
148    // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
149    private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
150    private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
151    private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
152    private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
153    private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
154
155    // Events:
156    // to keep in sync with frameworks/av/include/media/AudioTrack.h
157    /**
158     * Event id denotes when playback head has reached a previously set marker.
159     */
160    private static final int NATIVE_EVENT_MARKER  = 3;
161    /**
162     * Event id denotes when previously set update period has elapsed during playback.
163     */
164    private static final int NATIVE_EVENT_NEW_POS = 4;
165
166    private final static String TAG = "android.media.AudioTrack";
167
168
169    /** @hide */
170    @IntDef({
171        WRITE_BLOCKING,
172        WRITE_NON_BLOCKING
173    })
174    @Retention(RetentionPolicy.SOURCE)
175    public @interface WriteMode {}
176
177    /**
178     * The write mode indicating the write operation will block until all data has been written,
179     * to be used in {@link #write(ByteBuffer, int, int)}
180     */
181    public final static int WRITE_BLOCKING = 0;
182    /**
183     * The write mode indicating the write operation will return immediately after
184     * queuing as much audio data for playback as possible without blocking, to be used in
185     * {@link #write(ByteBuffer, int, int)}.
186     */
187    public final static int WRITE_NON_BLOCKING = 1;
188
189    //--------------------------------------------------------------------------
190    // Member variables
191    //--------------------
192    /**
193     * Indicates the state of the AudioTrack instance.
194     */
195    private int mState = STATE_UNINITIALIZED;
196    /**
197     * Indicates the play state of the AudioTrack instance.
198     */
199    private int mPlayState = PLAYSTATE_STOPPED;
200    /**
201     * Lock to make sure mPlayState updates are reflecting the actual state of the object.
202     */
203    private final Object mPlayStateLock = new Object();
204    /**
205     * Sizes of the native audio buffer.
206     */
207    private int mNativeBufferSizeInBytes = 0;
208    private int mNativeBufferSizeInFrames = 0;
209    /**
210     * Handler for events coming from the native code.
211     */
212    private NativeEventHandlerDelegate mEventHandlerDelegate;
213    /**
214     * Looper associated with the thread that creates the AudioTrack instance.
215     */
216    private final Looper mInitializationLooper;
217    /**
218     * The audio data source sampling rate in Hz.
219     */
220    private int mSampleRate; // initialized by all constructors
221    /**
222     * The number of audio output channels (1 is mono, 2 is stereo).
223     */
224    private int mChannelCount = 1;
225    /**
226     * The audio channel mask.
227     */
228    private int mChannels = AudioFormat.CHANNEL_OUT_MONO;
229
230    /**
231     * The type of the audio stream to play. See
232     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
233     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
234     *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
235     *   {@link AudioManager#STREAM_DTMF}.
236     */
237    private int mStreamType = AudioManager.STREAM_MUSIC;
238
239    private final AudioAttributes mAttributes;
240    /**
241     * The way audio is consumed by the audio sink, streaming or static.
242     */
243    private int mDataLoadMode = MODE_STREAM;
244    /**
245     * The current audio channel configuration.
246     */
247    private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
248    /**
249     * The encoding of the audio samples.
250     * @see AudioFormat#ENCODING_PCM_8BIT
251     * @see AudioFormat#ENCODING_PCM_16BIT
252     * @see AudioFormat#ENCODING_PCM_FLOAT
253     */
254    private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
255    /**
256     * Audio session ID
257     */
258    private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
259    /**
260     * Reference to the app-ops service.
261     */
262    private final IAppOpsService mAppOps;
263
264    //--------------------------------
265    // Used exclusively by native code
266    //--------------------
267    /**
268     * Accessed by native methods: provides access to C++ AudioTrack object.
269     */
270    @SuppressWarnings("unused")
271    private long mNativeTrackInJavaObj;
272    /**
273     * Accessed by native methods: provides access to the JNI data (i.e. resources used by
274     * the native AudioTrack object, but not stored in it).
275     */
276    @SuppressWarnings("unused")
277    private long mJniData;
278
279
280    //--------------------------------------------------------------------------
281    // Constructor, Finalize
282    //--------------------
283    /**
284     * Class constructor.
285     * @param streamType the type of the audio stream. See
286     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
287     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
288     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
289     * @param sampleRateInHz the initial source sample rate expressed in Hz.
290     * @param channelConfig describes the configuration of the audio channels.
291     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
292     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
293     * @param audioFormat the format in which the audio data is represented.
294     *   See {@link AudioFormat#ENCODING_PCM_16BIT},
295     *   {@link AudioFormat#ENCODING_PCM_8BIT},
296     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
297     * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
298     *   read from for playback.
299     *   If track's creation mode is {@link #MODE_STREAM}, you can write data into
300     *   this buffer in chunks less than or equal to this size, and it is typical to use
301     *   chunks of 1/2 of the total size to permit double-buffering.
302     *   If the track's creation mode is {@link #MODE_STATIC},
303     *   this is the maximum length sample, or audio clip, that can be played by this instance.
304     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
305     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
306     *   smaller than getMinBufferSize() will result in an initialization failure.
307     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
308     * @throws java.lang.IllegalArgumentException
309     */
310    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
311            int bufferSizeInBytes, int mode)
312    throws IllegalArgumentException {
313        this(streamType, sampleRateInHz, channelConfig, audioFormat,
314                bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE);
315    }
316
317    /**
318     * Class constructor with audio session. Use this constructor when the AudioTrack must be
319     * attached to a particular audio session. The primary use of the audio session ID is to
320     * associate audio effects to a particular instance of AudioTrack: if an audio session ID
321     * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
322     * and media players in the same session and not to the output mix.
323     * When an AudioTrack is created without specifying a session, it will create its own session
324     * which can be retrieved by calling the {@link #getAudioSessionId()} method.
325     * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
326     * session
327     * with all other media players or audio tracks in the same session, otherwise a new session
328     * will be created for this track if none is supplied.
329     * @param streamType the type of the audio stream. See
330     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
331     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
332     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
333     * @param sampleRateInHz the initial source sample rate expressed in Hz.
334     * @param channelConfig describes the configuration of the audio channels.
335     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
336     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
337     * @param audioFormat the format in which the audio data is represented.
338     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
339     *   {@link AudioFormat#ENCODING_PCM_8BIT},
340     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
341     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
342     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
343     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
344     *   this is the maximum size of the sound that will be played for this instance.
345     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
346     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
347     *   smaller than getMinBufferSize() will result in an initialization failure.
348     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
349     * @param sessionId Id of audio session the AudioTrack must be attached to
350     * @throws java.lang.IllegalArgumentException
351     */
352    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
353            int bufferSizeInBytes, int mode, int sessionId)
354    throws IllegalArgumentException {
355        // mState already == STATE_UNINITIALIZED
356        this((new AudioAttributes.Builder())
357                    .setLegacyStreamType(streamType)
358                    .build(),
359                (new AudioFormat.Builder())
360                    .setChannelMask(channelConfig)
361                    .setEncoding(audioFormat)
362                    .setSampleRate(sampleRateInHz)
363                    .build(),
364                bufferSizeInBytes,
365                mode, sessionId);
366    }
367
368    /**
369     * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
370     * @param attributes a non-null {@link AudioAttributes} instance.
371     * @param format a non-null {@link AudioFormat} instance describing the format of the data
372     *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
373     *     configuring the audio format parameters such as encoding, channel mask and sample rate.
374     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
375     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
376     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
377     *   this is the maximum size of the sound that will be played for this instance.
378     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
379     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
380     *   smaller than getMinBufferSize() will result in an initialization failure.
381     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
382     * @param sessionId ID of audio session the AudioTrack must be attached to, or
383     *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
384     *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
385     *   construction.
386     * @throws IllegalArgumentException
387     */
388    public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
389            int mode, int sessionId)
390                    throws IllegalArgumentException {
391        // mState already == STATE_UNINITIALIZED
392
393        if (attributes == null) {
394            throw new IllegalArgumentException("Illegal null AudioAttributes");
395        }
396        if (format == null) {
397            throw new IllegalArgumentException("Illegal null AudioFormat");
398        }
399
400        // remember which looper is associated with the AudioTrack instantiation
401        Looper looper;
402        if ((looper = Looper.myLooper()) == null) {
403            looper = Looper.getMainLooper();
404        }
405
406        int rate = 0;
407        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0)
408        {
409            rate = format.getSampleRate();
410        } else {
411            rate = AudioSystem.getPrimaryOutputSamplingRate();
412            if (rate <= 0) {
413                rate = 44100;
414            }
415        }
416        int channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
417        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0)
418        {
419            channelMask = format.getChannelMask();
420        }
421        int encoding = AudioFormat.ENCODING_DEFAULT;
422        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
423            encoding = format.getEncoding();
424        }
425        audioParamCheck(rate, channelMask, encoding, mode);
426        mStreamType = AudioSystem.STREAM_DEFAULT;
427
428        audioBuffSizeCheck(bufferSizeInBytes);
429
430        mInitializationLooper = looper;
431        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
432        mAppOps = IAppOpsService.Stub.asInterface(b);
433
434        mAttributes = (new AudioAttributes.Builder(attributes).build());
435
436        if (sessionId < 0) {
437            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
438        }
439
440        int[] session = new int[1];
441        session[0] = sessionId;
442        // native initialization
443        int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
444                mSampleRate, mChannels, mAudioFormat,
445                mNativeBufferSizeInBytes, mDataLoadMode, session);
446        if (initResult != SUCCESS) {
447            loge("Error code "+initResult+" when initializing AudioTrack.");
448            return; // with mState == STATE_UNINITIALIZED
449        }
450
451        mSessionId = session[0];
452
453        if (mDataLoadMode == MODE_STATIC) {
454            mState = STATE_NO_STATIC_DATA;
455        } else {
456            mState = STATE_INITIALIZED;
457        }
458    }
459
460    // mask of all the channels supported by this implementation
461    private static final int SUPPORTED_OUT_CHANNELS =
462            AudioFormat.CHANNEL_OUT_FRONT_LEFT |
463            AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
464            AudioFormat.CHANNEL_OUT_FRONT_CENTER |
465            AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
466            AudioFormat.CHANNEL_OUT_BACK_LEFT |
467            AudioFormat.CHANNEL_OUT_BACK_RIGHT |
468            AudioFormat.CHANNEL_OUT_BACK_CENTER;
469
470    // Convenience method for the constructor's parameter checks.
471    // This is where constructor IllegalArgumentException-s are thrown
472    // postconditions:
473    //    mChannelCount is valid
474    //    mChannels is valid
475    //    mAudioFormat is valid
476    //    mSampleRate is valid
477    //    mDataLoadMode is valid
478    private void audioParamCheck(int sampleRateInHz,
479                                 int channelConfig, int audioFormat, int mode) {
480        //--------------
481        // sample rate, note these values are subject to change
482        if ( (sampleRateInHz < 4000) || (sampleRateInHz > 48000) ) {
483            throw new IllegalArgumentException(sampleRateInHz
484                    + "Hz is not a supported sample rate.");
485        }
486        mSampleRate = sampleRateInHz;
487
488        //--------------
489        // channel config
490        mChannelConfiguration = channelConfig;
491
492        switch (channelConfig) {
493        case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
494        case AudioFormat.CHANNEL_OUT_MONO:
495        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
496            mChannelCount = 1;
497            mChannels = AudioFormat.CHANNEL_OUT_MONO;
498            break;
499        case AudioFormat.CHANNEL_OUT_STEREO:
500        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
501            mChannelCount = 2;
502            mChannels = AudioFormat.CHANNEL_OUT_STEREO;
503            break;
504        default:
505            if (!isMultichannelConfigSupported(channelConfig)) {
506                // input channel configuration features unsupported channels
507                throw new IllegalArgumentException("Unsupported channel configuration.");
508            }
509            mChannels = channelConfig;
510            mChannelCount = Integer.bitCount(channelConfig);
511        }
512
513        //--------------
514        // audio format
515        if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
516            audioFormat = AudioFormat.ENCODING_PCM_16BIT;
517        }
518
519        if (!AudioFormat.isValidEncoding(audioFormat)) {
520            throw new IllegalArgumentException("Unsupported audio encoding.");
521        }
522        mAudioFormat = audioFormat;
523
524        //--------------
525        // audio load mode
526        if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
527                ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
528            throw new IllegalArgumentException("Invalid mode.");
529        }
530        mDataLoadMode = mode;
531    }
532
533    /**
534     * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
535     * @param channelConfig the mask to validate
536     * @return false if the AudioTrack can't be used with such a mask
537     */
538    private static boolean isMultichannelConfigSupported(int channelConfig) {
539        // check for unsupported channels
540        if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
541            loge("Channel configuration features unsupported channels");
542            return false;
543        }
544        // check for unsupported multichannel combinations:
545        // - FL/FR must be present
546        // - L/R channels must be paired (e.g. no single L channel)
547        final int frontPair =
548                AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
549        if ((channelConfig & frontPair) != frontPair) {
550                loge("Front channels must be present in multichannel configurations");
551                return false;
552        }
553        final int backPair =
554                AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
555        if ((channelConfig & backPair) != 0) {
556            if ((channelConfig & backPair) != backPair) {
557                loge("Rear channels can't be used independently");
558                return false;
559            }
560        }
561        return true;
562    }
563
564
565    // Convenience method for the constructor's audio buffer size check.
566    // preconditions:
567    //    mChannelCount is valid
568    //    mAudioFormat is valid
569    // postcondition:
570    //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
571    private void audioBuffSizeCheck(int audioBufferSize) {
572        // NB: this section is only valid with PCM data.
573        //     To update when supporting compressed formats
574        int frameSizeInBytes;
575        if (AudioFormat.isEncodingLinearPcm(mAudioFormat)) {
576            frameSizeInBytes = mChannelCount
577                    * (AudioFormat.getBytesPerSample(mAudioFormat));
578        } else {
579            frameSizeInBytes = 1;
580        }
581        if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
582            throw new IllegalArgumentException("Invalid audio buffer size.");
583        }
584
585        mNativeBufferSizeInBytes = audioBufferSize;
586        mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
587    }
588
589
590    /**
591     * Releases the native AudioTrack resources.
592     */
593    public void release() {
594        // even though native_release() stops the native AudioTrack, we need to stop
595        // AudioTrack subclasses too.
596        try {
597            stop();
598        } catch(IllegalStateException ise) {
599            // don't raise an exception, we're releasing the resources.
600        }
601        native_release();
602        mState = STATE_UNINITIALIZED;
603    }
604
605    @Override
606    protected void finalize() {
607        native_finalize();
608    }
609
610    //--------------------------------------------------------------------------
611    // Getters
612    //--------------------
613    /**
614     * Returns the minimum gain value, which is the constant 0.0.
615     * Gain values less than 0.0 will be clamped to 0.0.
616     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
617     * @return the minimum value, which is the constant 0.0.
618     */
619    static public float getMinVolume() {
620        return GAIN_MIN;
621    }
622
623    /**
624     * Returns the maximum gain value, which is greater than or equal to 1.0.
625     * Gain values greater than the maximum will be clamped to the maximum.
626     * <p>The word "volume" in the API name is historical; this is actually a gain.
627     * expressed as a linear multiplier on sample values, where a maximum value of 1.0
628     * corresponds to a gain of 0 dB (sample values left unmodified).
629     * @return the maximum value, which is greater than or equal to 1.0.
630     */
631    static public float getMaxVolume() {
632        return GAIN_MAX;
633    }
634
635    /**
636     * Returns the configured audio data sample rate in Hz
637     */
638    public int getSampleRate() {
639        return mSampleRate;
640    }
641
642    /**
643     * Returns the current playback rate in Hz.
644     */
645    public int getPlaybackRate() {
646        return native_get_playback_rate();
647    }
648
649    /**
650     * Returns the configured audio data format. See {@link AudioFormat#ENCODING_PCM_16BIT}
651     * and {@link AudioFormat#ENCODING_PCM_8BIT}.
652     */
653    public int getAudioFormat() {
654        return mAudioFormat;
655    }
656
657    /**
658     * Returns the type of audio stream this AudioTrack is configured for.
659     * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
660     * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
661     * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
662     * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
663     */
664    public int getStreamType() {
665        return mStreamType;
666    }
667
668    /**
669     * Returns the configured channel configuration.
670     * See {@link AudioFormat#CHANNEL_OUT_MONO}
671     * and {@link AudioFormat#CHANNEL_OUT_STEREO}.
672     */
673    public int getChannelConfiguration() {
674        return mChannelConfiguration;
675    }
676
677    /**
678     * Returns the configured number of channels.
679     */
680    public int getChannelCount() {
681        return mChannelCount;
682    }
683
684    /**
685     * Returns the state of the AudioTrack instance. This is useful after the
686     * AudioTrack instance has been created to check if it was initialized
687     * properly. This ensures that the appropriate resources have been acquired.
688     * @see #STATE_INITIALIZED
689     * @see #STATE_NO_STATIC_DATA
690     * @see #STATE_UNINITIALIZED
691     */
692    public int getState() {
693        return mState;
694    }
695
696    /**
697     * Returns the playback state of the AudioTrack instance.
698     * @see #PLAYSTATE_STOPPED
699     * @see #PLAYSTATE_PAUSED
700     * @see #PLAYSTATE_PLAYING
701     */
702    public int getPlayState() {
703        synchronized (mPlayStateLock) {
704            return mPlayState;
705        }
706    }
707
708    /**
709     *  Returns the "native frame count", derived from the bufferSizeInBytes specified at
710     *  creation time and converted to frame units.
711     *  If track's creation mode is {@link #MODE_STATIC},
712     *  it is equal to the specified bufferSizeInBytes converted to frame units.
713     *  If track's creation mode is {@link #MODE_STREAM},
714     *  it is typically greater than or equal to the specified bufferSizeInBytes converted to frame
715     *  units; it may be rounded up to a larger value if needed by the target device implementation.
716     *  @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
717     *  See {@link AudioManager#getProperty(String)} for key
718     *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
719     */
720    @Deprecated
721    protected int getNativeFrameCount() {
722        return native_get_native_frame_count();
723    }
724
725    /**
726     * Returns marker position expressed in frames.
727     * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
728     * or zero if marker is disabled.
729     */
730    public int getNotificationMarkerPosition() {
731        return native_get_marker_pos();
732    }
733
734    /**
735     * Returns the notification update period expressed in frames.
736     * Zero means that no position update notifications are being delivered.
737     */
738    public int getPositionNotificationPeriod() {
739        return native_get_pos_update_period();
740    }
741
742    /**
743     * Returns the playback head position expressed in frames.
744     * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
745     * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
746     * This is a continuously advancing counter.  It will wrap (overflow) periodically,
747     * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
748     * It is reset to zero by flush(), reload(), and stop().
749     */
750    public int getPlaybackHeadPosition() {
751        return native_get_position();
752    }
753
754    /**
755     * Returns this track's estimated latency in milliseconds. This includes the latency due
756     * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
757     *
758     * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
759     * a better solution.
760     * @hide
761     */
762    public int getLatency() {
763        return native_get_latency();
764    }
765
766    /**
767     *  Returns the output sample rate in Hz for the specified stream type.
768     */
769    static public int getNativeOutputSampleRate(int streamType) {
770        return native_get_output_sample_rate(streamType);
771    }
772
773    /**
774     * Returns the minimum buffer size required for the successful creation of an AudioTrack
775     * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
776     * guarantee a smooth playback under load, and higher values should be chosen according to
777     * the expected frequency at which the buffer will be refilled with additional data to play.
778     * For example, if you intend to dynamically set the source sample rate of an AudioTrack
779     * to a higher value than the initial source sample rate, be sure to configure the buffer size
780     * based on the highest planned sample rate.
781     * @param sampleRateInHz the source sample rate expressed in Hz.
782     * @param channelConfig describes the configuration of the audio channels.
783     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
784     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
785     * @param audioFormat the format in which the audio data is represented.
786     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
787     *   {@link AudioFormat#ENCODING_PCM_8BIT},
788     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
789     * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
790     *   or {@link #ERROR} if unable to query for output properties,
791     *   or the minimum buffer size expressed in bytes.
792     */
793    static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
794        int channelCount = 0;
795        switch(channelConfig) {
796        case AudioFormat.CHANNEL_OUT_MONO:
797        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
798            channelCount = 1;
799            break;
800        case AudioFormat.CHANNEL_OUT_STEREO:
801        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
802            channelCount = 2;
803            break;
804        default:
805            if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
806                // input channel configuration features unsupported channels
807                loge("getMinBufferSize(): Invalid channel configuration.");
808                return ERROR_BAD_VALUE;
809            } else {
810                channelCount = Integer.bitCount(channelConfig);
811            }
812        }
813
814        if (!AudioFormat.isValidEncoding(audioFormat)) {
815            loge("getMinBufferSize(): Invalid audio format.");
816            return ERROR_BAD_VALUE;
817        }
818
819        // sample rate, note these values are subject to change
820        if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {
821            loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
822            return ERROR_BAD_VALUE;
823        }
824
825        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
826        if (size <= 0) {
827            loge("getMinBufferSize(): error querying hardware");
828            return ERROR;
829        }
830        else {
831            return size;
832        }
833    }
834
835    /**
836     * Returns the audio session ID.
837     *
838     * @return the ID of the audio session this AudioTrack belongs to.
839     */
840    public int getAudioSessionId() {
841        return mSessionId;
842    }
843
844   /**
845    * Poll for a timestamp on demand.
846    * <p>
847    * If you need to track timestamps during initial warmup or after a routing or mode change,
848    * you should request a new timestamp once per second until the reported timestamps
849    * show that the audio clock is stable.
850    * Thereafter, query for a new timestamp approximately once every 10 seconds to once per minute.
851    * Calling this method more often is inefficient.
852    * It is also counter-productive to call this method more often than recommended,
853    * because the short-term differences between successive timestamp reports are not meaningful.
854    * If you need a high-resolution mapping between frame position and presentation time,
855    * consider implementing that at application level, based on low-resolution timestamps.
856    * <p>
857    * The audio data at the returned position may either already have been
858    * presented, or may have not yet been presented but is committed to be presented.
859    * It is not possible to request the time corresponding to a particular position,
860    * or to request the (fractional) position corresponding to a particular time.
861    * If you need such features, consider implementing them at application level.
862    *
863    * @param timestamp a reference to a non-null AudioTimestamp instance allocated
864    *        and owned by caller.
865    * @return true if a timestamp is available, or false if no timestamp is available.
866    *         If a timestamp if available,
867    *         the AudioTimestamp instance is filled in with a position in frame units, together
868    *         with the estimated time when that frame was presented or is committed to
869    *         be presented.
870    *         In the case that no timestamp is available, any supplied instance is left unaltered.
871    *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
872    *         or during and immediately after a route change.
873    */
874    // Add this text when the "on new timestamp" API is added:
875    //   Use if you need to get the most recent timestamp outside of the event callback handler.
876    public boolean getTimestamp(AudioTimestamp timestamp)
877    {
878        if (timestamp == null) {
879            throw new IllegalArgumentException();
880        }
881        // It's unfortunate, but we have to either create garbage every time or use synchronized
882        long[] longArray = new long[2];
883        int ret = native_get_timestamp(longArray);
884        if (ret != SUCCESS) {
885            return false;
886        }
887        timestamp.framePosition = longArray[0];
888        timestamp.nanoTime = longArray[1];
889        return true;
890    }
891
892
893    //--------------------------------------------------------------------------
894    // Initialization / configuration
895    //--------------------
896    /**
897     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
898     * for each periodic playback head position update.
899     * Notifications will be received in the same thread as the one in which the AudioTrack
900     * instance was created.
901     * @param listener
902     */
903    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
904        setPlaybackPositionUpdateListener(listener, null);
905    }
906
907    /**
908     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
909     * for each periodic playback head position update.
910     * Use this method to receive AudioTrack events in the Handler associated with another
911     * thread than the one in which you created the AudioTrack instance.
912     * @param listener
913     * @param handler the Handler that will receive the event notification messages.
914     */
915    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
916                                                    Handler handler) {
917        if (listener != null) {
918            mEventHandlerDelegate = new NativeEventHandlerDelegate(this, listener, handler);
919        } else {
920            mEventHandlerDelegate = null;
921        }
922    }
923
924
925    private static float clampGainOrLevel(float gainOrLevel) {
926        if (Float.isNaN(gainOrLevel)) {
927            throw new IllegalArgumentException();
928        }
929        if (gainOrLevel < GAIN_MIN) {
930            gainOrLevel = GAIN_MIN;
931        } else if (gainOrLevel > GAIN_MAX) {
932            gainOrLevel = GAIN_MAX;
933        }
934        return gainOrLevel;
935    }
936
937
938     /**
939     * Sets the specified left and right output gain values on the AudioTrack.
940     * <p>Gain values are clamped to the closed interval [0.0, max] where
941     * max is the value of {@link #getMaxVolume}.
942     * A value of 0.0 results in zero gain (silence), and
943     * a value of 1.0 means unity gain (signal unchanged).
944     * The default value is 1.0 meaning unity gain.
945     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
946     * @param leftGain output gain for the left channel.
947     * @param rightGain output gain for the right channel
948     * @return error code or success, see {@link #SUCCESS},
949     *    {@link #ERROR_INVALID_OPERATION}
950     * @deprecated Applications should use {@link #setVolume} instead, as it
951     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
952     */
953    public int setStereoVolume(float leftGain, float rightGain) {
954        if (isRestricted()) {
955            return SUCCESS;
956        }
957        if (mState == STATE_UNINITIALIZED) {
958            return ERROR_INVALID_OPERATION;
959        }
960
961        leftGain = clampGainOrLevel(leftGain);
962        rightGain = clampGainOrLevel(rightGain);
963
964        native_setVolume(leftGain, rightGain);
965
966        return SUCCESS;
967    }
968
969
970    /**
971     * Sets the specified output gain value on all channels of this track.
972     * <p>Gain values are clamped to the closed interval [0.0, max] where
973     * max is the value of {@link #getMaxVolume}.
974     * A value of 0.0 results in zero gain (silence), and
975     * a value of 1.0 means unity gain (signal unchanged).
976     * The default value is 1.0 meaning unity gain.
977     * <p>This API is preferred over {@link #setStereoVolume}, as it
978     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
979     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
980     * @param gain output gain for all channels.
981     * @return error code or success, see {@link #SUCCESS},
982     *    {@link #ERROR_INVALID_OPERATION}
983     */
984    public int setVolume(float gain) {
985        return setStereoVolume(gain, gain);
986    }
987
988
989    /**
990     * Sets the playback sample rate for this track. This sets the sampling rate at which
991     * the audio data will be consumed and played back
992     * (as set by the sampleRateInHz parameter in the
993     * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
994     * not the original sampling rate of the
995     * content. For example, setting it to half the sample rate of the content will cause the
996     * playback to last twice as long, but will also result in a pitch shift down by one octave.
997     * The valid sample rate range is from 1 Hz to twice the value returned by
998     * {@link #getNativeOutputSampleRate(int)}.
999     * @param sampleRateInHz the sample rate expressed in Hz
1000     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1001     *    {@link #ERROR_INVALID_OPERATION}
1002     */
1003    public int setPlaybackRate(int sampleRateInHz) {
1004        if (mState != STATE_INITIALIZED) {
1005            return ERROR_INVALID_OPERATION;
1006        }
1007        if (sampleRateInHz <= 0) {
1008            return ERROR_BAD_VALUE;
1009        }
1010        return native_set_playback_rate(sampleRateInHz);
1011    }
1012
1013
1014    /**
1015     * Sets the position of the notification marker.  At most one marker can be active.
1016     * @param markerInFrames marker position in wrapping frame units similar to
1017     * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
1018     * To set a marker at a position which would appear as zero due to wraparound,
1019     * a workaround is to use a non-zero position near zero, such as -1 or 1.
1020     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1021     *  {@link #ERROR_INVALID_OPERATION}
1022     */
1023    public int setNotificationMarkerPosition(int markerInFrames) {
1024        if (mState == STATE_UNINITIALIZED) {
1025            return ERROR_INVALID_OPERATION;
1026        }
1027        return native_set_marker_pos(markerInFrames);
1028    }
1029
1030
1031    /**
1032     * Sets the period for the periodic notification event.
1033     * @param periodInFrames update period expressed in frames
1034     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
1035     */
1036    public int setPositionNotificationPeriod(int periodInFrames) {
1037        if (mState == STATE_UNINITIALIZED) {
1038            return ERROR_INVALID_OPERATION;
1039        }
1040        return native_set_pos_update_period(periodInFrames);
1041    }
1042
1043
1044    /**
1045     * Sets the playback head position.
1046     * The track must be stopped or paused for the position to be changed,
1047     * and must use the {@link #MODE_STATIC} mode.
1048     * @param positionInFrames playback head position expressed in frames
1049     * Zero corresponds to start of buffer.
1050     * The position must not be greater than the buffer size in frames, or negative.
1051     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1052     *    {@link #ERROR_INVALID_OPERATION}
1053     */
1054    public int setPlaybackHeadPosition(int positionInFrames) {
1055        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1056                getPlayState() == PLAYSTATE_PLAYING) {
1057            return ERROR_INVALID_OPERATION;
1058        }
1059        if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
1060            return ERROR_BAD_VALUE;
1061        }
1062        return native_set_position(positionInFrames);
1063    }
1064
1065    /**
1066     * Sets the loop points and the loop count. The loop can be infinite.
1067     * Similarly to setPlaybackHeadPosition,
1068     * the track must be stopped or paused for the loop points to be changed,
1069     * and must use the {@link #MODE_STATIC} mode.
1070     * @param startInFrames loop start marker expressed in frames
1071     * Zero corresponds to start of buffer.
1072     * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1073     * @param endInFrames loop end marker expressed in frames
1074     * The total buffer size in frames corresponds to end of buffer.
1075     * The end marker must not be greater than the buffer size in frames.
1076     * For looping, the end marker must not be less than or equal to the start marker,
1077     * but to disable looping
1078     * it is permitted for start marker, end marker, and loop count to all be 0.
1079     * @param loopCount the number of times the loop is looped.
1080     *    A value of -1 means infinite looping, and 0 disables looping.
1081     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1082     *    {@link #ERROR_INVALID_OPERATION}
1083     */
1084    public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1085        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1086                getPlayState() == PLAYSTATE_PLAYING) {
1087            return ERROR_INVALID_OPERATION;
1088        }
1089        if (loopCount == 0) {
1090            ;   // explicitly allowed as an exception to the loop region range check
1091        } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1092                startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1093            return ERROR_BAD_VALUE;
1094        }
1095        return native_set_loop(startInFrames, endInFrames, loopCount);
1096    }
1097
1098    /**
1099     * Sets the initialization state of the instance. This method was originally intended to be used
1100     * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1101     * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1102     * @param state the state of the AudioTrack instance
1103     * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1104     */
1105    @Deprecated
1106    protected void setState(int state) {
1107        mState = state;
1108    }
1109
1110
1111    //---------------------------------------------------------
1112    // Transport control methods
1113    //--------------------
1114    /**
1115     * Starts playing an AudioTrack.
1116     * If track's creation mode is {@link #MODE_STATIC}, you must have called write() prior.
1117     *
1118     * @throws IllegalStateException
1119     */
1120    public void play()
1121    throws IllegalStateException {
1122        if (mState != STATE_INITIALIZED) {
1123            throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1124        }
1125        if (isRestricted()) {
1126            setVolume(0);
1127        }
1128        synchronized(mPlayStateLock) {
1129            native_start();
1130            mPlayState = PLAYSTATE_PLAYING;
1131        }
1132    }
1133
1134    private boolean isRestricted() {
1135        try {
1136            final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType);
1137            final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage,
1138                    Process.myUid(), ActivityThread.currentPackageName());
1139            return mode != AppOpsManager.MODE_ALLOWED;
1140        } catch (RemoteException e) {
1141            return false;
1142        }
1143    }
1144
1145    /**
1146     * Stops playing the audio data.
1147     * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
1148     * after the last buffer that was written has been played. For an immediate stop, use
1149     * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
1150     * back yet.
1151     * @throws IllegalStateException
1152     */
1153    public void stop()
1154    throws IllegalStateException {
1155        if (mState != STATE_INITIALIZED) {
1156            throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
1157        }
1158
1159        // stop playing
1160        synchronized(mPlayStateLock) {
1161            native_stop();
1162            mPlayState = PLAYSTATE_STOPPED;
1163        }
1164    }
1165
1166    /**
1167     * Pauses the playback of the audio data. Data that has not been played
1168     * back will not be discarded. Subsequent calls to {@link #play} will play
1169     * this data back. See {@link #flush()} to discard this data.
1170     *
1171     * @throws IllegalStateException
1172     */
1173    public void pause()
1174    throws IllegalStateException {
1175        if (mState != STATE_INITIALIZED) {
1176            throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
1177        }
1178        //logd("pause()");
1179
1180        // pause playback
1181        synchronized(mPlayStateLock) {
1182            native_pause();
1183            mPlayState = PLAYSTATE_PAUSED;
1184        }
1185    }
1186
1187
1188    //---------------------------------------------------------
1189    // Audio data supply
1190    //--------------------
1191
1192    /**
1193     * Flushes the audio data currently queued for playback. Any data that has
1194     * not been played back will be discarded.  No-op if not stopped or paused,
1195     * or if the track's creation mode is not {@link #MODE_STREAM}.
1196     */
1197    public void flush() {
1198        if (mState == STATE_INITIALIZED) {
1199            // flush the data in native layer
1200            native_flush();
1201        }
1202
1203    }
1204
1205    /**
1206     * Writes the audio data to the audio sink for playback (streaming mode),
1207     * or copies audio data for later playback (static buffer mode).
1208     * In streaming mode, will block until all data has been written to the audio sink.
1209     * In static buffer mode, copies the data to the buffer starting at offset 0.
1210     * Note that the actual playback of this data might occur after this function
1211     * returns. This function is thread safe with respect to {@link #stop} calls,
1212     * in which case all of the specified data might not be written to the audio sink.
1213     *
1214     * @param audioData the array that holds the data to play.
1215     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
1216     *    starts.
1217     * @param sizeInBytes the number of bytes to read in audioData after the offset.
1218     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
1219     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1220     *    the parameters don't resolve to valid data and indexes, or
1221     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1222     *    needs to be recreated.
1223     */
1224
1225    public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) {
1226
1227        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1228            return ERROR_INVALID_OPERATION;
1229        }
1230
1231        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
1232                || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
1233                || (offsetInBytes + sizeInBytes > audioData.length)) {
1234            return ERROR_BAD_VALUE;
1235        }
1236
1237        int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
1238                true /*isBlocking*/);
1239
1240        if ((mDataLoadMode == MODE_STATIC)
1241                && (mState == STATE_NO_STATIC_DATA)
1242                && (ret > 0)) {
1243            // benign race with respect to other APIs that read mState
1244            mState = STATE_INITIALIZED;
1245        }
1246
1247        return ret;
1248    }
1249
1250
1251    /**
1252     * Writes the audio data to the audio sink for playback (streaming mode),
1253     * or copies audio data for later playback (static buffer mode).
1254     * In streaming mode, will block until all data has been written to the audio sink.
1255     * In static buffer mode, copies the data to the buffer starting at offset 0.
1256     * Note that the actual playback of this data might occur after this function
1257     * returns. This function is thread safe with respect to {@link #stop} calls,
1258     * in which case all of the specified data might not be written to the audio sink.
1259     *
1260     * @param audioData the array that holds the data to play.
1261     * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1262     *     starts.
1263     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1264     * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION}
1265     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1266     *    the parameters don't resolve to valid data and indexes.
1267     */
1268
1269    public int write(short[] audioData, int offsetInShorts, int sizeInShorts) {
1270
1271        if (mState == STATE_UNINITIALIZED || mAudioFormat != AudioFormat.ENCODING_PCM_16BIT) {
1272            return ERROR_INVALID_OPERATION;
1273        }
1274
1275        if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
1276                || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
1277                || (offsetInShorts + sizeInShorts > audioData.length)) {
1278            return ERROR_BAD_VALUE;
1279        }
1280
1281        int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat);
1282
1283        if ((mDataLoadMode == MODE_STATIC)
1284                && (mState == STATE_NO_STATIC_DATA)
1285                && (ret > 0)) {
1286            // benign race with respect to other APIs that read mState
1287            mState = STATE_INITIALIZED;
1288        }
1289
1290        return ret;
1291    }
1292
1293
1294    /**
1295     * Writes the audio data to the audio sink for playback (streaming mode),
1296     * or copies audio data for later playback (static buffer mode).
1297     * In static buffer mode, copies the data to the buffer starting at offset 0,
1298     * and the write mode is ignored.
1299     * In streaming mode, the blocking behavior will depend on the write mode.
1300     * <p>
1301     * Note that the actual playback of this data might occur after this function
1302     * returns. This function is thread safe with respect to {@link #stop} calls,
1303     * in which case all of the specified data might not be written to the audio sink.
1304     * <p>
1305     * @param audioData the array that holds the data to play.
1306     *     The implementation does not clip for sample values within the nominal range
1307     *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
1308     *     less than or equal to unity (1.0f), and in the absence of post-processing effects
1309     *     that could add energy, such as reverb.  For the convenience of applications
1310     *     that compute samples using filters with non-unity gain,
1311     *     sample values +3 dB beyond the nominal range are permitted.
1312     *     However such values may eventually be limited or clipped, depending on various gains
1313     *     and later processing in the audio path.  Therefore applications are encouraged
1314     *     to provide samples values within the nominal range.
1315     * @param offsetInFloats the offset, expressed as a number of floats,
1316     *     in audioData where the data to play starts.
1317     * @param sizeInFloats the number of floats to read in audioData after the offset.
1318     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1319     *     effect in static mode.
1320     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1321     *         to the audio sink.
1322     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1323     *     queuing as much audio data for playback as possible without blocking.
1324     * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION}
1325     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1326     *    the parameters don't resolve to valid data and indexes.
1327     */
1328    public int write(float[] audioData, int offsetInFloats, int sizeInFloats,
1329            @WriteMode int writeMode) {
1330
1331        if (mState == STATE_UNINITIALIZED) {
1332            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1333            return ERROR_INVALID_OPERATION;
1334        }
1335
1336        if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
1337            Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
1338            return ERROR_INVALID_OPERATION;
1339        }
1340
1341        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1342            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1343            return ERROR_BAD_VALUE;
1344        }
1345
1346        if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
1347                || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
1348                || (offsetInFloats + sizeInFloats > audioData.length)) {
1349            Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
1350            return ERROR_BAD_VALUE;
1351        }
1352
1353        int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
1354                writeMode == WRITE_BLOCKING);
1355
1356        if ((mDataLoadMode == MODE_STATIC)
1357                && (mState == STATE_NO_STATIC_DATA)
1358                && (ret > 0)) {
1359            // benign race with respect to other APIs that read mState
1360            mState = STATE_INITIALIZED;
1361        }
1362
1363        return ret;
1364    }
1365
1366
1367    /**
1368     * Writes the audio data to the audio sink for playback (streaming mode),
1369     * or copies audio data for later playback (static buffer mode).
1370     * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write
1371     * mode is ignored.
1372     * In streaming mode, the blocking behavior will depend on the write mode.
1373     * @param audioData the buffer that holds the data to play, starting at the position reported
1374     *     by <code>audioData.position()</code>.
1375     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
1376     *     have been advanced to reflect the amount of data that was successfully written to
1377     *     the AudioTrack.
1378     * @param sizeInBytes number of bytes to write.
1379     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
1380     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1381     *     effect in static mode.
1382     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1383     *         to the audio sink.
1384     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1385     *     queuing as much audio data for playback as possible without blocking.
1386     * @return 0 or a positive number of bytes that were written, or
1387     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1388     */
1389    public int write(ByteBuffer audioData, int sizeInBytes,
1390            @WriteMode int writeMode) {
1391
1392        if (mState == STATE_UNINITIALIZED) {
1393            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1394            return ERROR_INVALID_OPERATION;
1395        }
1396
1397        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1398            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1399            return ERROR_BAD_VALUE;
1400        }
1401
1402        if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
1403            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
1404            return ERROR_BAD_VALUE;
1405        }
1406
1407        int ret = 0;
1408        if (audioData.isDirect()) {
1409            ret = native_write_native_bytes(audioData,
1410                    audioData.position(), sizeInBytes, mAudioFormat,
1411                    writeMode == WRITE_BLOCKING);
1412        } else {
1413            ret = native_write_byte(NioUtils.unsafeArray(audioData),
1414                    NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
1415                    sizeInBytes, mAudioFormat,
1416                    writeMode == WRITE_BLOCKING);
1417        }
1418
1419        if ((mDataLoadMode == MODE_STATIC)
1420                && (mState == STATE_NO_STATIC_DATA)
1421                && (ret > 0)) {
1422            // benign race with respect to other APIs that read mState
1423            mState = STATE_INITIALIZED;
1424        }
1425
1426        if (ret > 0) {
1427            audioData.position(audioData.position() + ret);
1428        }
1429
1430        return ret;
1431    }
1432
1433    /**
1434     * Notifies the native resource to reuse the audio data already loaded in the native
1435     * layer, that is to rewind to start of buffer.
1436     * The track's creation mode must be {@link #MODE_STATIC}.
1437     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1438     *  {@link #ERROR_INVALID_OPERATION}
1439     */
1440    public int reloadStaticData() {
1441        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
1442            return ERROR_INVALID_OPERATION;
1443        }
1444        return native_reload_static();
1445    }
1446
1447    //--------------------------------------------------------------------------
1448    // Audio effects management
1449    //--------------------
1450
1451    /**
1452     * Attaches an auxiliary effect to the audio track. A typical auxiliary
1453     * effect is a reverberation effect which can be applied on any sound source
1454     * that directs a certain amount of its energy to this effect. This amount
1455     * is defined by setAuxEffectSendLevel().
1456     * {@see #setAuxEffectSendLevel(float)}.
1457     * <p>After creating an auxiliary effect (e.g.
1458     * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
1459     * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
1460     * this method to attach the audio track to the effect.
1461     * <p>To detach the effect from the audio track, call this method with a
1462     * null effect id.
1463     *
1464     * @param effectId system wide unique id of the effect to attach
1465     * @return error code or success, see {@link #SUCCESS},
1466     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
1467     */
1468    public int attachAuxEffect(int effectId) {
1469        if (mState == STATE_UNINITIALIZED) {
1470            return ERROR_INVALID_OPERATION;
1471        }
1472        return native_attachAuxEffect(effectId);
1473    }
1474
1475    /**
1476     * Sets the send level of the audio track to the attached auxiliary effect
1477     * {@link #attachAuxEffect(int)}.  Effect levels
1478     * are clamped to the closed interval [0.0, max] where
1479     * max is the value of {@link #getMaxVolume}.
1480     * A value of 0.0 results in no effect, and a value of 1.0 is full send.
1481     * <p>By default the send level is 0.0f, so even if an effect is attached to the player
1482     * this method must be called for the effect to be applied.
1483     * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
1484     * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
1485     * so an appropriate conversion from linear UI input x to level is:
1486     * x == 0 -&gt; level = 0
1487     * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
1488     *
1489     * @param level linear send level
1490     * @return error code or success, see {@link #SUCCESS},
1491     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
1492     */
1493    public int setAuxEffectSendLevel(float level) {
1494        if (isRestricted()) {
1495            return SUCCESS;
1496        }
1497        if (mState == STATE_UNINITIALIZED) {
1498            return ERROR_INVALID_OPERATION;
1499        }
1500        level = clampGainOrLevel(level);
1501        int err = native_setAuxEffectSendLevel(level);
1502        return err == 0 ? SUCCESS : ERROR;
1503    }
1504
1505    //---------------------------------------------------------
1506    // Interface definitions
1507    //--------------------
1508    /**
1509     * Interface definition for a callback to be invoked when the playback head position of
1510     * an AudioTrack has reached a notification marker or has increased by a certain period.
1511     */
1512    public interface OnPlaybackPositionUpdateListener  {
1513        /**
1514         * Called on the listener to notify it that the previously set marker has been reached
1515         * by the playback head.
1516         */
1517        void onMarkerReached(AudioTrack track);
1518
1519        /**
1520         * Called on the listener to periodically notify it that the playback head has reached
1521         * a multiple of the notification period.
1522         */
1523        void onPeriodicNotification(AudioTrack track);
1524    }
1525
1526    //---------------------------------------------------------
1527    // Inner classes
1528    //--------------------
1529    /**
1530     * Helper class to handle the forwarding of native events to the appropriate listener
1531     * (potentially) handled in a different thread
1532     */
1533    private class NativeEventHandlerDelegate {
1534        private final Handler mHandler;
1535
1536        NativeEventHandlerDelegate(final AudioTrack track,
1537                                   final OnPlaybackPositionUpdateListener listener,
1538                                   Handler handler) {
1539            // find the looper for our new event handler
1540            Looper looper;
1541            if (handler != null) {
1542                looper = handler.getLooper();
1543            } else {
1544                // no given handler, use the looper the AudioTrack was created in
1545                looper = mInitializationLooper;
1546            }
1547
1548            // construct the event handler with this looper
1549            if (looper != null) {
1550                // implement the event handler delegate
1551                mHandler = new Handler(looper) {
1552                    @Override
1553                    public void handleMessage(Message msg) {
1554                        if (track == null) {
1555                            return;
1556                        }
1557                        switch(msg.what) {
1558                        case NATIVE_EVENT_MARKER:
1559                            if (listener != null) {
1560                                listener.onMarkerReached(track);
1561                            }
1562                            break;
1563                        case NATIVE_EVENT_NEW_POS:
1564                            if (listener != null) {
1565                                listener.onPeriodicNotification(track);
1566                            }
1567                            break;
1568                        default:
1569                            loge("Unknown native event type: " + msg.what);
1570                            break;
1571                        }
1572                    }
1573                };
1574            } else {
1575                mHandler = null;
1576            }
1577        }
1578
1579        Handler getHandler() {
1580            return mHandler;
1581        }
1582    }
1583
1584
1585    //---------------------------------------------------------
1586    // Java methods called from the native side
1587    //--------------------
1588    @SuppressWarnings("unused")
1589    private static void postEventFromNative(Object audiotrack_ref,
1590            int what, int arg1, int arg2, Object obj) {
1591        //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
1592        AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
1593        if (track == null) {
1594            return;
1595        }
1596
1597        NativeEventHandlerDelegate delegate = track.mEventHandlerDelegate;
1598        if (delegate != null) {
1599            Handler handler = delegate.getHandler();
1600            if (handler != null) {
1601                Message m = handler.obtainMessage(what, arg1, arg2, obj);
1602                handler.sendMessage(m);
1603            }
1604        }
1605
1606    }
1607
1608
1609    //---------------------------------------------------------
1610    // Native methods called from the Java side
1611    //--------------------
1612
1613    // post-condition: mStreamType is overwritten with a value
1614    //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
1615    //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
1616    private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
1617            Object /*AudioAttributes*/ attributes,
1618            int sampleRate, int channelMask, int audioFormat,
1619            int buffSizeInBytes, int mode, int[] sessionId);
1620
1621    private native final void native_finalize();
1622
1623    private native final void native_release();
1624
1625    private native final void native_start();
1626
1627    private native final void native_stop();
1628
1629    private native final void native_pause();
1630
1631    private native final void native_flush();
1632
1633    private native final int native_write_byte(byte[] audioData,
1634                                               int offsetInBytes, int sizeInBytes, int format,
1635                                               boolean isBlocking);
1636
1637    private native final int native_write_short(short[] audioData,
1638                                                int offsetInShorts, int sizeInShorts, int format);
1639
1640    private native final int native_write_float(float[] audioData,
1641                                                int offsetInFloats, int sizeInFloats, int format,
1642                                                boolean isBlocking);
1643
1644    private native final int native_write_native_bytes(Object audioData,
1645            int positionInBytes, int sizeInBytes, int format, boolean blocking);
1646
1647    private native final int native_reload_static();
1648
1649    private native final int native_get_native_frame_count();
1650
1651    private native final void native_setVolume(float leftVolume, float rightVolume);
1652
1653    private native final int native_set_playback_rate(int sampleRateInHz);
1654    private native final int native_get_playback_rate();
1655
1656    private native final int native_set_marker_pos(int marker);
1657    private native final int native_get_marker_pos();
1658
1659    private native final int native_set_pos_update_period(int updatePeriod);
1660    private native final int native_get_pos_update_period();
1661
1662    private native final int native_set_position(int position);
1663    private native final int native_get_position();
1664
1665    private native final int native_get_latency();
1666
1667    // longArray must be a non-null array of length >= 2
1668    // [0] is assigned the frame position
1669    // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
1670    private native final int native_get_timestamp(long[] longArray);
1671
1672    private native final int native_set_loop(int start, int end, int loopCount);
1673
1674    static private native final int native_get_output_sample_rate(int streamType);
1675    static private native final int native_get_min_buff_size(
1676            int sampleRateInHz, int channelConfig, int audioFormat);
1677
1678    private native final int native_attachAuxEffect(int effectId);
1679    private native final int native_setAuxEffectSendLevel(float level);
1680
1681    //---------------------------------------------------------
1682    // Utility methods
1683    //------------------
1684
1685    private static void logd(String msg) {
1686        Log.d(TAG, msg);
1687    }
1688
1689    private static void loge(String msg) {
1690        Log.e(TAG, msg);
1691    }
1692
1693}
1694