AudioTrack.java revision 30d794360f35592554403922bcc07835fea4737b
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.media;
18
19import java.lang.annotation.Retention;
20import java.lang.annotation.RetentionPolicy;
21import java.lang.ref.WeakReference;
22import java.lang.Math;
23import java.nio.ByteBuffer;
24import java.nio.ByteOrder;
25import java.nio.NioUtils;
26import java.util.Collection;
27
28import android.annotation.IntDef;
29import android.annotation.NonNull;
30import android.annotation.SystemApi;
31import android.app.ActivityThread;
32import android.app.AppOpsManager;
33import android.content.Context;
34import android.os.Handler;
35import android.os.IBinder;
36import android.os.Looper;
37import android.os.Message;
38import android.os.Process;
39import android.os.RemoteException;
40import android.os.ServiceManager;
41import android.util.ArrayMap;
42import android.util.Log;
43
44import com.android.internal.app.IAppOpsService;
45
46
47/**
48 * The AudioTrack class manages and plays a single audio resource for Java applications.
49 * It allows streaming of PCM audio buffers to the audio sink for playback. This is
50 * achieved by "pushing" the data to the AudioTrack object using one of the
51 *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
52 *  and {@link #write(float[], int, int, int)} methods.
53 *
54 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
55 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
56 * one of the {@code write()} methods. These are blocking and return when the data has been
57 * transferred from the Java layer to the native layer and queued for playback. The streaming
58 * mode is most useful when playing blocks of audio data that for instance are:
59 *
60 * <ul>
61 *   <li>too big to fit in memory because of the duration of the sound to play,</li>
62 *   <li>too big to fit in memory because of the characteristics of the audio data
63 *         (high sampling rate, bits per sample ...)</li>
64 *   <li>received or generated while previously queued audio is playing.</li>
65 * </ul>
66 *
67 * The static mode should be chosen when dealing with short sounds that fit in memory and
68 * that need to be played with the smallest latency possible. The static mode will
69 * therefore be preferred for UI and game sounds that are played often, and with the
70 * smallest overhead possible.
71 *
72 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
73 * The size of this buffer, specified during the construction, determines how long an AudioTrack
74 * can play before running out of data.<br>
75 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
76 * be played from it.<br>
77 * For the streaming mode, data will be written to the audio sink in chunks of
78 * sizes less than or equal to the total buffer size.
79 *
80 * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
81 */
82public class AudioTrack
83{
84    //---------------------------------------------------------
85    // Constants
86    //--------------------
87    /** Minimum value for a linear gain or auxiliary effect level.
88     *  This value must be exactly equal to 0.0f; do not change it.
89     */
90    private static final float GAIN_MIN = 0.0f;
91    /** Maximum value for a linear gain or auxiliary effect level.
92     *  This value must be greater than or equal to 1.0f.
93     */
94    private static final float GAIN_MAX = 1.0f;
95
96    /** Minimum value for sample rate */
97    private static final int SAMPLE_RATE_HZ_MIN = 4000;
98    /** Maximum value for sample rate */
99    private static final int SAMPLE_RATE_HZ_MAX = 96000;
100
101    /** Maximum value for AudioTrack channel count */
102    private static final int CHANNEL_COUNT_MAX = 8;
103
104    /** indicates AudioTrack state is stopped */
105    public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
106    /** indicates AudioTrack state is paused */
107    public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
108    /** indicates AudioTrack state is playing */
109    public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
110
111    // keep these values in sync with android_media_AudioTrack.cpp
112    /**
113     * Creation mode where audio data is transferred from Java to the native layer
114     * only once before the audio starts playing.
115     */
116    public static final int MODE_STATIC = 0;
117    /**
118     * Creation mode where audio data is streamed from Java to the native layer
119     * as the audio is playing.
120     */
121    public static final int MODE_STREAM = 1;
122
123    /** @hide */
124    @IntDef({
125        MODE_STATIC,
126        MODE_STREAM
127    })
128    @Retention(RetentionPolicy.SOURCE)
129    public @interface TransferMode {}
130
131    /**
132     * State of an AudioTrack that was not successfully initialized upon creation.
133     */
134    public static final int STATE_UNINITIALIZED = 0;
135    /**
136     * State of an AudioTrack that is ready to be used.
137     */
138    public static final int STATE_INITIALIZED   = 1;
139    /**
140     * State of a successfully initialized AudioTrack that uses static data,
141     * but that hasn't received that data yet.
142     */
143    public static final int STATE_NO_STATIC_DATA = 2;
144
145    /**
146     * Denotes a successful operation.
147     */
148    public  static final int SUCCESS                               = AudioSystem.SUCCESS;
149    /**
150     * Denotes a generic operation failure.
151     */
152    public  static final int ERROR                                 = AudioSystem.ERROR;
153    /**
154     * Denotes a failure due to the use of an invalid value.
155     */
156    public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
157    /**
158     * Denotes a failure due to the improper use of a method.
159     */
160    public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
161
162    // Error codes:
163    // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
164    private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
165    private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
166    private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
167    private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
168    private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
169
170    // Events:
171    // to keep in sync with frameworks/av/include/media/AudioTrack.h
172    /**
173     * Event id denotes when playback head has reached a previously set marker.
174     */
175    private static final int NATIVE_EVENT_MARKER  = 3;
176    /**
177     * Event id denotes when previously set update period has elapsed during playback.
178     */
179    private static final int NATIVE_EVENT_NEW_POS = 4;
180
181    /**
182     * Event id denotes when the routing changes.
183     */
184    private final static int NATIVE_EVENT_ROUTING_CHANGE = 1000;
185
186
187    private final static String TAG = "android.media.AudioTrack";
188
189
190    /** @hide */
191    @IntDef({
192        WRITE_BLOCKING,
193        WRITE_NON_BLOCKING
194    })
195    @Retention(RetentionPolicy.SOURCE)
196    public @interface WriteMode {}
197
198    /**
199     * The write mode indicating the write operation will block until all data has been written,
200     * to be used in {@link #write(ByteBuffer, int, int)}
201     */
202    public final static int WRITE_BLOCKING = 0;
203    /**
204     * The write mode indicating the write operation will return immediately after
205     * queuing as much audio data for playback as possible without blocking, to be used in
206     * {@link #write(ByteBuffer, int, int)}.
207     */
208    public final static int WRITE_NON_BLOCKING = 1;
209
210    //--------------------------------------------------------------------------
211    // Member variables
212    //--------------------
213    /**
214     * Indicates the state of the AudioTrack instance.
215     */
216    private int mState = STATE_UNINITIALIZED;
217    /**
218     * Indicates the play state of the AudioTrack instance.
219     */
220    private int mPlayState = PLAYSTATE_STOPPED;
221    /**
222     * Lock to make sure mPlayState updates are reflecting the actual state of the object.
223     */
224    private final Object mPlayStateLock = new Object();
225    /**
226     * Sizes of the native audio buffer.
227     * These values are set during construction and can be stale.
228     * To obtain the current native audio buffer frame count use {@link #getNativeFrameCount()}.
229     */
230    private int mNativeBufferSizeInBytes = 0;
231    private int mNativeBufferSizeInFrames = 0;
232    /**
233     * Handler for events coming from the native code.
234     */
235    private NativePositionEventHandlerDelegate mEventHandlerDelegate;
236    /**
237     * Looper associated with the thread that creates the AudioTrack instance.
238     */
239    private final Looper mInitializationLooper;
240    /**
241     * The audio data source sampling rate in Hz.
242     */
243    private int mSampleRate; // initialized by all constructors
244    /**
245     * The number of audio output channels (1 is mono, 2 is stereo).
246     */
247    private int mChannelCount = 1;
248    /**
249     * The audio channel mask used for calling native AudioTrack
250     */
251    private int mChannels = AudioFormat.CHANNEL_OUT_MONO;
252
253    /**
254     * The type of the audio stream to play. See
255     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
256     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
257     *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
258     *   {@link AudioManager#STREAM_DTMF}.
259     */
260    private int mStreamType = AudioManager.STREAM_MUSIC;
261
262    private final AudioAttributes mAttributes;
263    /**
264     * The way audio is consumed by the audio sink, streaming or static.
265     */
266    private int mDataLoadMode = MODE_STREAM;
267    /**
268     * The current channel position mask, as specified on AudioTrack creation.
269     * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
270     * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
271     */
272    private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
273    /**
274     * The current audio channel index configuration (if specified).
275     */
276    private int mChannelIndexMask = 0;
277    /**
278     * The encoding of the audio samples.
279     * @see AudioFormat#ENCODING_PCM_8BIT
280     * @see AudioFormat#ENCODING_PCM_16BIT
281     * @see AudioFormat#ENCODING_PCM_FLOAT
282     */
283    private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
284    /**
285     * Audio session ID
286     */
287    private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
288    /**
289     * Reference to the app-ops service.
290     */
291    private final IAppOpsService mAppOps;
292    /**
293     * HW_AV_SYNC track AV Sync Header
294     */
295    private ByteBuffer mAvSyncHeader = null;
296    /**
297     * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
298     */
299    private int mAvSyncBytesRemaining = 0;
300
301    //--------------------------------
302    // Used exclusively by native code
303    //--------------------
304    /**
305     * Accessed by native methods: provides access to C++ AudioTrack object.
306     */
307    @SuppressWarnings("unused")
308    private long mNativeTrackInJavaObj;
309    /**
310     * Accessed by native methods: provides access to the JNI data (i.e. resources used by
311     * the native AudioTrack object, but not stored in it).
312     */
313    @SuppressWarnings("unused")
314    private long mJniData;
315
316
317    //--------------------------------------------------------------------------
318    // Constructor, Finalize
319    //--------------------
320    /**
321     * Class constructor.
322     * @param streamType the type of the audio stream. See
323     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
324     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
325     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
326     * @param sampleRateInHz the initial source sample rate expressed in Hz.
327     * @param channelConfig describes the configuration of the audio channels.
328     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
329     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
330     * @param audioFormat the format in which the audio data is represented.
331     *   See {@link AudioFormat#ENCODING_PCM_16BIT},
332     *   {@link AudioFormat#ENCODING_PCM_8BIT},
333     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
334     * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
335     *   read from for playback. This should be a multiple of the frame size in bytes.
336     *   <p> If the track's creation mode is {@link #MODE_STATIC},
337     *   this is the maximum length sample, or audio clip, that can be played by this instance.
338     *   <p> If the track's creation mode is {@link #MODE_STREAM},
339     *   this should be the desired buffer size
340     *   for the <code>AudioTrack</code> to satisfy the application's
341     *   natural latency requirements.
342     *   If <code>bufferSizeInBytes</code> is less than the
343     *   minimum buffer size for the output sink, it is automatically increased to the minimum
344     *   buffer size.
345     *   The method {@link #getNativeFrameCount()} returns the
346     *   actual size in frames of the native buffer created, which
347     *   determines the frequency to write
348     *   to the streaming <code>AudioTrack</code> to avoid underrun.
349     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
350     * @throws java.lang.IllegalArgumentException
351     */
352    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
353            int bufferSizeInBytes, int mode)
354    throws IllegalArgumentException {
355        this(streamType, sampleRateInHz, channelConfig, audioFormat,
356                bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE);
357    }
358
359    /**
360     * Class constructor with audio session. Use this constructor when the AudioTrack must be
361     * attached to a particular audio session. The primary use of the audio session ID is to
362     * associate audio effects to a particular instance of AudioTrack: if an audio session ID
363     * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
364     * and media players in the same session and not to the output mix.
365     * When an AudioTrack is created without specifying a session, it will create its own session
366     * which can be retrieved by calling the {@link #getAudioSessionId()} method.
367     * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
368     * session
369     * with all other media players or audio tracks in the same session, otherwise a new session
370     * will be created for this track if none is supplied.
371     * @param streamType the type of the audio stream. See
372     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
373     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
374     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
375     * @param sampleRateInHz the initial source sample rate expressed in Hz.
376     * @param channelConfig describes the configuration of the audio channels.
377     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
378     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
379     * @param audioFormat the format in which the audio data is represented.
380     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
381     *   {@link AudioFormat#ENCODING_PCM_8BIT},
382     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
383     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
384     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
385     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
386     *   this is the maximum size of the sound that will be played for this instance.
387     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
388     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
389     *   smaller than getMinBufferSize() will result in an initialization failure.
390     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
391     * @param sessionId Id of audio session the AudioTrack must be attached to
392     * @throws java.lang.IllegalArgumentException
393     */
394    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
395            int bufferSizeInBytes, int mode, int sessionId)
396    throws IllegalArgumentException {
397        // mState already == STATE_UNINITIALIZED
398        this((new AudioAttributes.Builder())
399                    .setLegacyStreamType(streamType)
400                    .build(),
401                (new AudioFormat.Builder())
402                    .setChannelMask(channelConfig)
403                    .setEncoding(audioFormat)
404                    .setSampleRate(sampleRateInHz)
405                    .build(),
406                bufferSizeInBytes,
407                mode, sessionId);
408    }
409
410    /**
411     * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
412     * @param attributes a non-null {@link AudioAttributes} instance.
413     * @param format a non-null {@link AudioFormat} instance describing the format of the data
414     *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
415     *     configuring the audio format parameters such as encoding, channel mask and sample rate.
416     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
417     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
418     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
419     *   this is the maximum size of the sound that will be played for this instance.
420     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
421     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
422     *   smaller than getMinBufferSize() will result in an initialization failure.
423     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
424     * @param sessionId ID of audio session the AudioTrack must be attached to, or
425     *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
426     *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
427     *   construction.
428     * @throws IllegalArgumentException
429     */
430    public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
431            int mode, int sessionId)
432                    throws IllegalArgumentException {
433        // mState already == STATE_UNINITIALIZED
434
435        if (attributes == null) {
436            throw new IllegalArgumentException("Illegal null AudioAttributes");
437        }
438        if (format == null) {
439            throw new IllegalArgumentException("Illegal null AudioFormat");
440        }
441
442        // remember which looper is associated with the AudioTrack instantiation
443        Looper looper;
444        if ((looper = Looper.myLooper()) == null) {
445            looper = Looper.getMainLooper();
446        }
447
448        int rate = 0;
449        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0)
450        {
451            rate = format.getSampleRate();
452        } else {
453            rate = AudioSystem.getPrimaryOutputSamplingRate();
454            if (rate <= 0) {
455                rate = 44100;
456            }
457        }
458        int channelIndexMask = 0;
459        if ((format.getPropertySetMask()
460                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
461            channelIndexMask = format.getChannelIndexMask();
462        }
463        int channelMask = 0;
464        if ((format.getPropertySetMask()
465                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
466            channelMask = format.getChannelMask();
467        } else if (channelIndexMask == 0) { // if no masks at all, use stereo
468            channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
469                    | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
470        }
471        int encoding = AudioFormat.ENCODING_DEFAULT;
472        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
473            encoding = format.getEncoding();
474        }
475        audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
476        mStreamType = AudioSystem.STREAM_DEFAULT;
477
478        audioBuffSizeCheck(bufferSizeInBytes);
479
480        mInitializationLooper = looper;
481        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
482        mAppOps = IAppOpsService.Stub.asInterface(b);
483
484        mAttributes = (new AudioAttributes.Builder(attributes).build());
485
486        if (sessionId < 0) {
487            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
488        }
489
490        int[] session = new int[1];
491        session[0] = sessionId;
492        // native initialization
493        int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
494                mSampleRate, mChannels, mChannelIndexMask, mAudioFormat,
495                mNativeBufferSizeInBytes, mDataLoadMode, session);
496        if (initResult != SUCCESS) {
497            loge("Error code "+initResult+" when initializing AudioTrack.");
498            return; // with mState == STATE_UNINITIALIZED
499        }
500
501        mSessionId = session[0];
502
503        if (mDataLoadMode == MODE_STATIC) {
504            mState = STATE_NO_STATIC_DATA;
505        } else {
506            mState = STATE_INITIALIZED;
507        }
508    }
509
510    /**
511     * Builder class for {@link AudioTrack} objects.
512     * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
513     * attributes and audio format parameters, you indicate which of those vary from the default
514     * behavior on the device.
515     * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
516     * parameters, to be used by a new <code>AudioTrack</code> instance:
517     *
518     * <pre class="prettyprint">
519     * AudioTrack player = new AudioTrack.Builder()
520     *         .setAudioAttributes(new AudioAttributes.Builder()
521     *                  .setUsage(AudioAttributes.USAGE_ALARM)
522     *                  .setContentType(CONTENT_TYPE_MUSIC)
523     *                  .build())
524     *         .setAudioFormat(new AudioFormat.Builder()
525     *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
526     *                 .setSampleRate(441000)
527     *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
528     *                 .build())
529     *         .setBufferSize(minBuffSize)
530     *         .build();
531     * </pre>
532     * <p>
533     * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
534     * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
535     * <br>If the audio format is not specified or is incomplete, its sample rate will be the
536     * default output sample rate of the device (see
537     * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be
538     * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
539     * {@link AudioFormat#ENCODING_PCM_16BIT}.
540     * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
541     * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
542     * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
543     * <code>MODE_STREAM</code> will be used.
544     * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
545     * be generated.
546     */
547    public static class Builder {
548        private AudioAttributes mAttributes;
549        private AudioFormat mFormat;
550        private int mBufferSizeInBytes;
551        private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
552        private int mMode = MODE_STREAM;
553
554        /**
555         * Constructs a new Builder with the default values as described above.
556         */
557        public Builder() {
558        }
559
560        /**
561         * Sets the {@link AudioAttributes}.
562         * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
563         *     data to be played.
564         * @return the same Builder instance.
565         * @throws IllegalArgumentException
566         */
567        public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
568                throws IllegalArgumentException {
569            if (attributes == null) {
570                throw new IllegalArgumentException("Illegal null AudioAttributes argument");
571            }
572            // keep reference, we only copy the data when building
573            mAttributes = attributes;
574            return this;
575        }
576
577        /**
578         * Sets the format of the audio data to be played by the {@link AudioTrack}.
579         * See {@link AudioFormat.Builder} for configuring the audio format parameters such
580         * as encoding, channel mask and sample rate.
581         * @param format a non-null {@link AudioFormat} instance.
582         * @return the same Builder instance.
583         * @throws IllegalArgumentException
584         */
585        public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
586                throws IllegalArgumentException {
587            if (format == null) {
588                throw new IllegalArgumentException("Illegal null AudioFormat argument");
589            }
590            // keep reference, we only copy the data when building
591            mFormat = format;
592            return this;
593        }
594
595        /**
596         * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
597         * If using the {@link AudioTrack} in streaming mode
598         * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
599         * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
600         * the minimum required buffer size for the successful creation of an AudioTrack instance
601         * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result
602         * in an exception when trying to build the <code>AudioTrack</code>.
603         * <br>If using the <code>AudioTrack</code> in static mode (see
604         * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
605         * played by this instance.
606         * @param bufferSizeInBytes
607         * @return the same Builder instance.
608         * @throws IllegalArgumentException
609         */
610        public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes)
611                throws IllegalArgumentException {
612            if (bufferSizeInBytes <= 0) {
613                throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
614            }
615            mBufferSizeInBytes = bufferSizeInBytes;
616            return this;
617        }
618
619        /**
620         * Sets the mode under which buffers of audio data are transferred from the
621         * {@link AudioTrack} to the framework.
622         * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
623         * @return the same Builder instance.
624         * @throws IllegalArgumentException
625         */
626        public @NonNull Builder setTransferMode(@TransferMode int mode)
627                throws IllegalArgumentException {
628            switch(mode) {
629                case MODE_STREAM:
630                case MODE_STATIC:
631                    mMode = mode;
632                    break;
633                default:
634                    throw new IllegalArgumentException("Invalid transfer mode " + mode);
635            }
636            return this;
637        }
638
639        /**
640         * Sets the session ID the {@link AudioTrack} will be attached to.
641         * @param sessionId a strictly positive ID number retrieved from another
642         *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
643         *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
644         *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
645         * @return the same Builder instance.
646         * @throws IllegalArgumentException
647         */
648        public @NonNull Builder setSessionId(int sessionId)
649                throws IllegalArgumentException {
650            if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
651                throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
652            }
653            mSessionId = sessionId;
654            return this;
655        }
656
657        /**
658         * Builds an {@link AudioTrack} instance initialized with all the parameters set
659         * on this <code>Builder</code>.
660         * @return a new {@link AudioTrack} instance.
661         * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
662         *     were incompatible, or if they are not supported by the device.
663         */
664        public @NonNull AudioTrack build() throws UnsupportedOperationException {
665            if (mAttributes == null) {
666                mAttributes = new AudioAttributes.Builder()
667                        .setUsage(AudioAttributes.USAGE_MEDIA)
668                        .build();
669            }
670            if (mFormat == null) {
671                mFormat = new AudioFormat.Builder()
672                        .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
673                        .setSampleRate(AudioSystem.getPrimaryOutputSamplingRate())
674                        .setEncoding(AudioFormat.ENCODING_DEFAULT)
675                        .build();
676            }
677            try {
678                // If the buffer size is not specified in streaming mode,
679                // use a single frame for the buffer size and let the
680                // native code figure out the minimum buffer size.
681                if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
682                    mBufferSizeInBytes = mFormat.getChannelCount()
683                            * mFormat.getBytesPerSample(mFormat.getEncoding());
684                }
685                return new AudioTrack(mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId);
686            } catch (IllegalArgumentException e) {
687                throw new UnsupportedOperationException(e.getMessage());
688            }
689        }
690    }
691
692    // mask of all the channels supported by this implementation
693    private static final int SUPPORTED_OUT_CHANNELS =
694            AudioFormat.CHANNEL_OUT_FRONT_LEFT |
695            AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
696            AudioFormat.CHANNEL_OUT_FRONT_CENTER |
697            AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
698            AudioFormat.CHANNEL_OUT_BACK_LEFT |
699            AudioFormat.CHANNEL_OUT_BACK_RIGHT |
700            AudioFormat.CHANNEL_OUT_BACK_CENTER |
701            AudioFormat.CHANNEL_OUT_SIDE_LEFT |
702            AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
703
704    // Convenience method for the constructor's parameter checks.
705    // This is where constructor IllegalArgumentException-s are thrown
706    // postconditions:
707    //    mChannelCount is valid
708    //    mChannels is valid
709    //    mAudioFormat is valid
710    //    mSampleRate is valid
711    //    mDataLoadMode is valid
712    private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
713                                 int audioFormat, int mode) {
714        //--------------
715        // sample rate, note these values are subject to change
716        if (sampleRateInHz < SAMPLE_RATE_HZ_MIN || sampleRateInHz > SAMPLE_RATE_HZ_MAX) {
717            throw new IllegalArgumentException(sampleRateInHz
718                    + "Hz is not a supported sample rate.");
719        }
720        mSampleRate = sampleRateInHz;
721
722        //--------------
723        // channel config
724        mChannelConfiguration = channelConfig;
725
726        switch (channelConfig) {
727        case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
728        case AudioFormat.CHANNEL_OUT_MONO:
729        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
730            mChannelCount = 1;
731            mChannels = AudioFormat.CHANNEL_OUT_MONO;
732            break;
733        case AudioFormat.CHANNEL_OUT_STEREO:
734        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
735            mChannelCount = 2;
736            mChannels = AudioFormat.CHANNEL_OUT_STEREO;
737            break;
738        default:
739            if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
740                mChannelCount = 0;
741                break; // channel index configuration only
742            }
743            if (!isMultichannelConfigSupported(channelConfig)) {
744                // input channel configuration features unsupported channels
745                throw new IllegalArgumentException("Unsupported channel configuration.");
746            }
747            mChannels = channelConfig;
748            mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
749        }
750        // check the channel index configuration (if present)
751        mChannelIndexMask = channelIndexMask;
752        if (mChannelIndexMask != 0) {
753            // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2
754            final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1;
755            if ((channelIndexMask & ~indexMask) != 0) {
756                throw new IllegalArgumentException("Unsupported channel index configuration "
757                        + channelIndexMask);
758            }
759            int channelIndexCount = Integer.bitCount(channelIndexMask);
760            if (mChannelCount == 0) {
761                 mChannelCount = channelIndexCount;
762            } else if (mChannelCount != channelIndexCount) {
763                throw new IllegalArgumentException("Channel count must match");
764            }
765        }
766
767        //--------------
768        // audio format
769        if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
770            audioFormat = AudioFormat.ENCODING_PCM_16BIT;
771        }
772
773        if (!AudioFormat.isValidEncoding(audioFormat)) {
774            throw new IllegalArgumentException("Unsupported audio encoding.");
775        }
776        mAudioFormat = audioFormat;
777
778        //--------------
779        // audio load mode
780        if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
781                ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
782            throw new IllegalArgumentException("Invalid mode.");
783        }
784        mDataLoadMode = mode;
785    }
786
787    /**
788     * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
789     * @param channelConfig the mask to validate
790     * @return false if the AudioTrack can't be used with such a mask
791     */
792    private static boolean isMultichannelConfigSupported(int channelConfig) {
793        // check for unsupported channels
794        if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
795            loge("Channel configuration features unsupported channels");
796            return false;
797        }
798        final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
799        if (channelCount > CHANNEL_COUNT_MAX) {
800            loge("Channel configuration contains too many channels " +
801                    channelCount + ">" + CHANNEL_COUNT_MAX);
802            return false;
803        }
804        // check for unsupported multichannel combinations:
805        // - FL/FR must be present
806        // - L/R channels must be paired (e.g. no single L channel)
807        final int frontPair =
808                AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
809        if ((channelConfig & frontPair) != frontPair) {
810                loge("Front channels must be present in multichannel configurations");
811                return false;
812        }
813        final int backPair =
814                AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
815        if ((channelConfig & backPair) != 0) {
816            if ((channelConfig & backPair) != backPair) {
817                loge("Rear channels can't be used independently");
818                return false;
819            }
820        }
821        final int sidePair =
822                AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
823        if ((channelConfig & sidePair) != 0
824                && (channelConfig & sidePair) != sidePair) {
825            loge("Side channels can't be used independently");
826            return false;
827        }
828        return true;
829    }
830
831
832    // Convenience method for the constructor's audio buffer size check.
833    // preconditions:
834    //    mChannelCount is valid
835    //    mAudioFormat is valid
836    // postcondition:
837    //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
838    private void audioBuffSizeCheck(int audioBufferSize) {
839        // NB: this section is only valid with PCM data.
840        //     To update when supporting compressed formats
841        int frameSizeInBytes;
842        if (AudioFormat.isEncodingLinearPcm(mAudioFormat)) {
843            frameSizeInBytes = mChannelCount
844                    * (AudioFormat.getBytesPerSample(mAudioFormat));
845        } else {
846            frameSizeInBytes = 1;
847        }
848        if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
849            throw new IllegalArgumentException("Invalid audio buffer size.");
850        }
851
852        mNativeBufferSizeInBytes = audioBufferSize;
853        mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
854    }
855
856
857    /**
858     * Releases the native AudioTrack resources.
859     */
860    public void release() {
861        // even though native_release() stops the native AudioTrack, we need to stop
862        // AudioTrack subclasses too.
863        try {
864            stop();
865        } catch(IllegalStateException ise) {
866            // don't raise an exception, we're releasing the resources.
867        }
868        native_release();
869        mState = STATE_UNINITIALIZED;
870    }
871
872    @Override
873    protected void finalize() {
874        native_finalize();
875    }
876
877    //--------------------------------------------------------------------------
878    // Getters
879    //--------------------
880    /**
881     * Returns the minimum gain value, which is the constant 0.0.
882     * Gain values less than 0.0 will be clamped to 0.0.
883     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
884     * @return the minimum value, which is the constant 0.0.
885     */
886    static public float getMinVolume() {
887        return GAIN_MIN;
888    }
889
890    /**
891     * Returns the maximum gain value, which is greater than or equal to 1.0.
892     * Gain values greater than the maximum will be clamped to the maximum.
893     * <p>The word "volume" in the API name is historical; this is actually a gain.
894     * expressed as a linear multiplier on sample values, where a maximum value of 1.0
895     * corresponds to a gain of 0 dB (sample values left unmodified).
896     * @return the maximum value, which is greater than or equal to 1.0.
897     */
898    static public float getMaxVolume() {
899        return GAIN_MAX;
900    }
901
902    /**
903     * Returns the configured audio data sample rate in Hz
904     */
905    public int getSampleRate() {
906        return mSampleRate;
907    }
908
909    /**
910     * Returns the current playback sample rate rate in Hz.
911     */
912    public int getPlaybackRate() {
913        return native_get_playback_rate();
914    }
915
916    /**
917     * Returns the current playback settings.
918     * See {@link #setPlaybackSettings(PlaybackSettings)} to set playback settings
919     * @return current {@link PlaybackSettings}.
920     * @throws IllegalStateException if track is not initialized.
921     */
922    public @NonNull PlaybackSettings getPlaybackSettings() {
923        float[] floatArray = new float[2];
924        int[] intArray = new int[2];
925        native_get_playback_settings(floatArray, intArray);
926        return new PlaybackSettings()
927                .setSpeed(floatArray[0])
928                .setPitch(floatArray[1])
929                .setAudioFallbackMode(intArray[0])
930                .setAudioStretchMode(intArray[1]);
931    }
932
933    /**
934     * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
935     * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
936     */
937    public int getAudioFormat() {
938        return mAudioFormat;
939    }
940
941    /**
942     * Returns the type of audio stream this AudioTrack is configured for.
943     * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
944     * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
945     * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
946     * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
947     */
948    public int getStreamType() {
949        return mStreamType;
950    }
951
952    /**
953     * Returns the configured channel position mask.
954     * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
955     * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
956     * This method may return {@link AudioFormat#CHANNEL_INVALID} if
957     * a channel index mask is used. Consider
958     * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
959     * which contains both the channel position mask and the channel index mask.
960     */
961    public int getChannelConfiguration() {
962        return mChannelConfiguration;
963    }
964
965    /**
966     * Returns the configured <code>AudioTrack</code> format.
967     * @return an {@link AudioFormat} containing the
968     * <code>AudioTrack</code> parameters at the time of configuration.
969     */
970    public @NonNull AudioFormat getFormat() {
971        AudioFormat.Builder builder = new AudioFormat.Builder()
972            .setSampleRate(mSampleRate)
973            .setEncoding(mAudioFormat);
974        if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
975            builder.setChannelMask(mChannelConfiguration);
976        }
977        if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
978            builder.setChannelIndexMask(mChannelIndexMask);
979        }
980        return builder.build();
981    }
982
983    /**
984     * Returns the configured number of channels.
985     */
986    public int getChannelCount() {
987        return mChannelCount;
988    }
989
990    /**
991     * Returns the state of the AudioTrack instance. This is useful after the
992     * AudioTrack instance has been created to check if it was initialized
993     * properly. This ensures that the appropriate resources have been acquired.
994     * @see #STATE_INITIALIZED
995     * @see #STATE_NO_STATIC_DATA
996     * @see #STATE_UNINITIALIZED
997     */
998    public int getState() {
999        return mState;
1000    }
1001
1002    /**
1003     * Returns the playback state of the AudioTrack instance.
1004     * @see #PLAYSTATE_STOPPED
1005     * @see #PLAYSTATE_PAUSED
1006     * @see #PLAYSTATE_PLAYING
1007     */
1008    public int getPlayState() {
1009        synchronized (mPlayStateLock) {
1010            return mPlayState;
1011        }
1012    }
1013
1014    /**
1015     *  Returns the "native frame count" of the <code>AudioTrack</code> buffer.
1016     *  <p> If the track's creation mode is {@link #MODE_STATIC},
1017     *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
1018     *  A static track's native frame count will not change.
1019     *  <p> If the track's creation mode is {@link #MODE_STREAM},
1020     *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
1021     *  For streaming tracks, this value may be rounded up to a larger value if needed by
1022     *  the target output sink, and
1023     *  if the track is subsequently routed to a different output sink, the native
1024     *  frame count may enlarge to accommodate.
1025     *  See also {@link AudioManager#getProperty(String)} for key
1026     *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1027     *  @return current size in frames of the audio track buffer.
1028     *  @throws IllegalStateException
1029     */
1030    public int getNativeFrameCount() throws IllegalStateException {
1031        return native_get_native_frame_count();
1032    }
1033
1034    /**
1035     * Returns marker position expressed in frames.
1036     * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
1037     * or zero if marker is disabled.
1038     */
1039    public int getNotificationMarkerPosition() {
1040        return native_get_marker_pos();
1041    }
1042
1043    /**
1044     * Returns the notification update period expressed in frames.
1045     * Zero means that no position update notifications are being delivered.
1046     */
1047    public int getPositionNotificationPeriod() {
1048        return native_get_pos_update_period();
1049    }
1050
1051    /**
1052     * Returns the playback head position expressed in frames.
1053     * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
1054     * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
1055     * This is a continuously advancing counter.  It will wrap (overflow) periodically,
1056     * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
1057     * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
1058     * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
1059     * the total number of frames played since reset,
1060     * <i>not</i> the current offset within the buffer.
1061     */
1062    public int getPlaybackHeadPosition() {
1063        return native_get_position();
1064    }
1065
1066    /**
1067     * Returns this track's estimated latency in milliseconds. This includes the latency due
1068     * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
1069     *
1070     * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
1071     * a better solution.
1072     * @hide
1073     */
1074    public int getLatency() {
1075        return native_get_latency();
1076    }
1077
1078    /**
1079     *  Returns the output sample rate in Hz for the specified stream type.
1080     */
1081    static public int getNativeOutputSampleRate(int streamType) {
1082        return native_get_output_sample_rate(streamType);
1083    }
1084
1085    /**
1086     * Returns the minimum buffer size required for the successful creation of an AudioTrack
1087     * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
1088     * guarantee a smooth playback under load, and higher values should be chosen according to
1089     * the expected frequency at which the buffer will be refilled with additional data to play.
1090     * For example, if you intend to dynamically set the source sample rate of an AudioTrack
1091     * to a higher value than the initial source sample rate, be sure to configure the buffer size
1092     * based on the highest planned sample rate.
1093     * @param sampleRateInHz the source sample rate expressed in Hz.
1094     * @param channelConfig describes the configuration of the audio channels.
1095     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
1096     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
1097     * @param audioFormat the format in which the audio data is represented.
1098     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
1099     *   {@link AudioFormat#ENCODING_PCM_8BIT},
1100     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1101     * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
1102     *   or {@link #ERROR} if unable to query for output properties,
1103     *   or the minimum buffer size expressed in bytes.
1104     */
1105    static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
1106        int channelCount = 0;
1107        switch(channelConfig) {
1108        case AudioFormat.CHANNEL_OUT_MONO:
1109        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1110            channelCount = 1;
1111            break;
1112        case AudioFormat.CHANNEL_OUT_STEREO:
1113        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1114            channelCount = 2;
1115            break;
1116        default:
1117            if (!isMultichannelConfigSupported(channelConfig)) {
1118                loge("getMinBufferSize(): Invalid channel configuration.");
1119                return ERROR_BAD_VALUE;
1120            } else {
1121                channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1122            }
1123        }
1124
1125        if (!AudioFormat.isValidEncoding(audioFormat)) {
1126            loge("getMinBufferSize(): Invalid audio format.");
1127            return ERROR_BAD_VALUE;
1128        }
1129
1130        // sample rate, note these values are subject to change
1131        if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {
1132            loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
1133            return ERROR_BAD_VALUE;
1134        }
1135
1136        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
1137        if (size <= 0) {
1138            loge("getMinBufferSize(): error querying hardware");
1139            return ERROR;
1140        }
1141        else {
1142            return size;
1143        }
1144    }
1145
1146    /**
1147     * Returns the audio session ID.
1148     *
1149     * @return the ID of the audio session this AudioTrack belongs to.
1150     */
1151    public int getAudioSessionId() {
1152        return mSessionId;
1153    }
1154
1155   /**
1156    * Poll for a timestamp on demand.
1157    * <p>
1158    * If you need to track timestamps during initial warmup or after a routing or mode change,
1159    * you should request a new timestamp once per second until the reported timestamps
1160    * show that the audio clock is stable.
1161    * Thereafter, query for a new timestamp approximately once every 10 seconds to once per minute.
1162    * Calling this method more often is inefficient.
1163    * It is also counter-productive to call this method more often than recommended,
1164    * because the short-term differences between successive timestamp reports are not meaningful.
1165    * If you need a high-resolution mapping between frame position and presentation time,
1166    * consider implementing that at application level, based on low-resolution timestamps.
1167    * <p>
1168    * The audio data at the returned position may either already have been
1169    * presented, or may have not yet been presented but is committed to be presented.
1170    * It is not possible to request the time corresponding to a particular position,
1171    * or to request the (fractional) position corresponding to a particular time.
1172    * If you need such features, consider implementing them at application level.
1173    *
1174    * @param timestamp a reference to a non-null AudioTimestamp instance allocated
1175    *        and owned by caller.
1176    * @return true if a timestamp is available, or false if no timestamp is available.
1177    *         If a timestamp if available,
1178    *         the AudioTimestamp instance is filled in with a position in frame units, together
1179    *         with the estimated time when that frame was presented or is committed to
1180    *         be presented.
1181    *         In the case that no timestamp is available, any supplied instance is left unaltered.
1182    *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
1183    *         or during and immediately after a route change.
1184    */
1185    // Add this text when the "on new timestamp" API is added:
1186    //   Use if you need to get the most recent timestamp outside of the event callback handler.
1187    public boolean getTimestamp(AudioTimestamp timestamp)
1188    {
1189        if (timestamp == null) {
1190            throw new IllegalArgumentException();
1191        }
1192        // It's unfortunate, but we have to either create garbage every time or use synchronized
1193        long[] longArray = new long[2];
1194        int ret = native_get_timestamp(longArray);
1195        if (ret != SUCCESS) {
1196            return false;
1197        }
1198        timestamp.framePosition = longArray[0];
1199        timestamp.nanoTime = longArray[1];
1200        return true;
1201    }
1202
1203
1204    //--------------------------------------------------------------------------
1205    // Initialization / configuration
1206    //--------------------
1207    /**
1208     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1209     * for each periodic playback head position update.
1210     * Notifications will be received in the same thread as the one in which the AudioTrack
1211     * instance was created.
1212     * @param listener
1213     */
1214    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
1215        setPlaybackPositionUpdateListener(listener, null);
1216    }
1217
1218    /**
1219     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1220     * for each periodic playback head position update.
1221     * Use this method to receive AudioTrack events in the Handler associated with another
1222     * thread than the one in which you created the AudioTrack instance.
1223     * @param listener
1224     * @param handler the Handler that will receive the event notification messages.
1225     */
1226    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
1227                                                    Handler handler) {
1228        if (listener != null) {
1229            mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
1230        } else {
1231            mEventHandlerDelegate = null;
1232        }
1233    }
1234
1235
1236    private static float clampGainOrLevel(float gainOrLevel) {
1237        if (Float.isNaN(gainOrLevel)) {
1238            throw new IllegalArgumentException();
1239        }
1240        if (gainOrLevel < GAIN_MIN) {
1241            gainOrLevel = GAIN_MIN;
1242        } else if (gainOrLevel > GAIN_MAX) {
1243            gainOrLevel = GAIN_MAX;
1244        }
1245        return gainOrLevel;
1246    }
1247
1248
1249     /**
1250     * Sets the specified left and right output gain values on the AudioTrack.
1251     * <p>Gain values are clamped to the closed interval [0.0, max] where
1252     * max is the value of {@link #getMaxVolume}.
1253     * A value of 0.0 results in zero gain (silence), and
1254     * a value of 1.0 means unity gain (signal unchanged).
1255     * The default value is 1.0 meaning unity gain.
1256     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1257     * @param leftGain output gain for the left channel.
1258     * @param rightGain output gain for the right channel
1259     * @return error code or success, see {@link #SUCCESS},
1260     *    {@link #ERROR_INVALID_OPERATION}
1261     * @deprecated Applications should use {@link #setVolume} instead, as it
1262     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
1263     */
1264    public int setStereoVolume(float leftGain, float rightGain) {
1265        if (isRestricted()) {
1266            return SUCCESS;
1267        }
1268        if (mState == STATE_UNINITIALIZED) {
1269            return ERROR_INVALID_OPERATION;
1270        }
1271
1272        leftGain = clampGainOrLevel(leftGain);
1273        rightGain = clampGainOrLevel(rightGain);
1274
1275        native_setVolume(leftGain, rightGain);
1276
1277        return SUCCESS;
1278    }
1279
1280
1281    /**
1282     * Sets the specified output gain value on all channels of this track.
1283     * <p>Gain values are clamped to the closed interval [0.0, max] where
1284     * max is the value of {@link #getMaxVolume}.
1285     * A value of 0.0 results in zero gain (silence), and
1286     * a value of 1.0 means unity gain (signal unchanged).
1287     * The default value is 1.0 meaning unity gain.
1288     * <p>This API is preferred over {@link #setStereoVolume}, as it
1289     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
1290     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1291     * @param gain output gain for all channels.
1292     * @return error code or success, see {@link #SUCCESS},
1293     *    {@link #ERROR_INVALID_OPERATION}
1294     */
1295    public int setVolume(float gain) {
1296        return setStereoVolume(gain, gain);
1297    }
1298
1299
1300    /**
1301     * Sets the playback sample rate for this track. This sets the sampling rate at which
1302     * the audio data will be consumed and played back
1303     * (as set by the sampleRateInHz parameter in the
1304     * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
1305     * not the original sampling rate of the
1306     * content. For example, setting it to half the sample rate of the content will cause the
1307     * playback to last twice as long, but will also result in a pitch shift down by one octave.
1308     * The valid sample rate range is from 1 Hz to twice the value returned by
1309     * {@link #getNativeOutputSampleRate(int)}.
1310     * Use {@link #setPlaybackSettings(PlaybackSettings)} for speed control.
1311     * @param sampleRateInHz the sample rate expressed in Hz
1312     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1313     *    {@link #ERROR_INVALID_OPERATION}
1314     */
1315    public int setPlaybackRate(int sampleRateInHz) {
1316        if (mState != STATE_INITIALIZED) {
1317            return ERROR_INVALID_OPERATION;
1318        }
1319        if (sampleRateInHz <= 0) {
1320            return ERROR_BAD_VALUE;
1321        }
1322        return native_set_playback_rate(sampleRateInHz);
1323    }
1324
1325
1326    /**
1327     * Sets the playback settings.
1328     * This method returns failure if it cannot apply the playback settings.
1329     * One possible cause is that the parameters for speed or pitch are out of range.
1330     * Another possible cause is that the <code>AudioTrack</code> is streaming
1331     * (see {@link #MODE_STREAM}) and the
1332     * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
1333     * on configuration must be larger than the speed multiplied by the minimum size
1334     * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
1335     * @param settings see {@link PlaybackSettings}. In particular,
1336     * speed, pitch, and audio mode should be set.
1337     * @throws IllegalArgumentException if the settings are invalid or not accepted.
1338     * @throws IllegalStateException if track is not initialized.
1339     */
1340    public void setPlaybackSettings(@NonNull PlaybackSettings settings) {
1341        if (settings == null) {
1342            throw new IllegalArgumentException("settings is null");
1343        }
1344        float[] floatArray;
1345        int[] intArray;
1346        try {
1347            floatArray = new float[] {
1348                    settings.getSpeed(),
1349                    settings.getPitch(),
1350            };
1351            intArray = new int[] {
1352                    settings.getAudioFallbackMode(),
1353                    settings.getAudioStretchMode(),
1354            };
1355        } catch (IllegalStateException e) {
1356            throw new IllegalArgumentException(e);
1357        }
1358        native_set_playback_settings(floatArray, intArray);
1359    }
1360
1361
1362    /**
1363     * Sets the position of the notification marker.  At most one marker can be active.
1364     * @param markerInFrames marker position in wrapping frame units similar to
1365     * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
1366     * To set a marker at a position which would appear as zero due to wraparound,
1367     * a workaround is to use a non-zero position near zero, such as -1 or 1.
1368     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1369     *  {@link #ERROR_INVALID_OPERATION}
1370     */
1371    public int setNotificationMarkerPosition(int markerInFrames) {
1372        if (mState == STATE_UNINITIALIZED) {
1373            return ERROR_INVALID_OPERATION;
1374        }
1375        return native_set_marker_pos(markerInFrames);
1376    }
1377
1378
1379    /**
1380     * Sets the period for the periodic notification event.
1381     * @param periodInFrames update period expressed in frames.
1382     * Zero period means no position updates.  A negative period is not allowed.
1383     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
1384     */
1385    public int setPositionNotificationPeriod(int periodInFrames) {
1386        if (mState == STATE_UNINITIALIZED) {
1387            return ERROR_INVALID_OPERATION;
1388        }
1389        return native_set_pos_update_period(periodInFrames);
1390    }
1391
1392
1393    /**
1394     * Sets the playback head position within the static buffer.
1395     * The track must be stopped or paused for the position to be changed,
1396     * and must use the {@link #MODE_STATIC} mode.
1397     * @param positionInFrames playback head position within buffer, expressed in frames.
1398     * Zero corresponds to start of buffer.
1399     * The position must not be greater than the buffer size in frames, or negative.
1400     * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
1401     * the position values have different meanings.
1402     * <br>
1403     * If looping is currently enabled and the new position is greater than or equal to the
1404     * loop end marker, the behavior varies by API level: for API level 22 and above,
1405     * the looping is first disabled and then the position is set.
1406     * For earlier API levels, the behavior is unspecified.
1407     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1408     *    {@link #ERROR_INVALID_OPERATION}
1409     */
1410    public int setPlaybackHeadPosition(int positionInFrames) {
1411        if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1412                getPlayState() == PLAYSTATE_PLAYING) {
1413            return ERROR_INVALID_OPERATION;
1414        }
1415        if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
1416            return ERROR_BAD_VALUE;
1417        }
1418        return native_set_position(positionInFrames);
1419    }
1420
1421    /**
1422     * Sets the loop points and the loop count. The loop can be infinite.
1423     * Similarly to setPlaybackHeadPosition,
1424     * the track must be stopped or paused for the loop points to be changed,
1425     * and must use the {@link #MODE_STATIC} mode.
1426     * @param startInFrames loop start marker expressed in frames.
1427     * Zero corresponds to start of buffer.
1428     * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1429     * @param endInFrames loop end marker expressed in frames.
1430     * The total buffer size in frames corresponds to end of buffer.
1431     * The end marker must not be greater than the buffer size in frames.
1432     * For looping, the end marker must not be less than or equal to the start marker,
1433     * but to disable looping
1434     * it is permitted for start marker, end marker, and loop count to all be 0.
1435     * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
1436     * If the loop period (endInFrames - startInFrames) is too small for the implementation to
1437     * support,
1438     * {@link #ERROR_BAD_VALUE} is returned.
1439     * The loop range is the interval [startInFrames, endInFrames).
1440     * <br>
1441     * For API level 22 and above, the position is left unchanged,
1442     * unless it is greater than or equal to the loop end marker, in which case
1443     * it is forced to the loop start marker.
1444     * For earlier API levels, the effect on position is unspecified.
1445     * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
1446     *    A value of -1 means infinite looping, and 0 disables looping.
1447     *    A value of positive N means to "loop" (go back) N times.  For example,
1448     *    a value of one means to play the region two times in total.
1449     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1450     *    {@link #ERROR_INVALID_OPERATION}
1451     */
1452    public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1453        if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1454                getPlayState() == PLAYSTATE_PLAYING) {
1455            return ERROR_INVALID_OPERATION;
1456        }
1457        if (loopCount == 0) {
1458            ;   // explicitly allowed as an exception to the loop region range check
1459        } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1460                startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1461            return ERROR_BAD_VALUE;
1462        }
1463        return native_set_loop(startInFrames, endInFrames, loopCount);
1464    }
1465
1466    /**
1467     * Sets the initialization state of the instance. This method was originally intended to be used
1468     * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1469     * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1470     * @param state the state of the AudioTrack instance
1471     * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1472     */
1473    @Deprecated
1474    protected void setState(int state) {
1475        mState = state;
1476    }
1477
1478
1479    //---------------------------------------------------------
1480    // Transport control methods
1481    //--------------------
1482    /**
1483     * Starts playing an AudioTrack.
1484     * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
1485     * the {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
1486     * or {@link #write(float[], int, int, int)} methods.
1487     * If the mode is {@link #MODE_STREAM}, you can optionally prime the
1488     * output buffer by writing up to bufferSizeInBytes (from constructor) before starting.
1489     * This priming will avoid an immediate underrun, but is not required.
1490     *
1491     * @throws IllegalStateException
1492     */
1493    public void play()
1494    throws IllegalStateException {
1495        if (mState != STATE_INITIALIZED) {
1496            throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1497        }
1498        if (isRestricted()) {
1499            setVolume(0);
1500        }
1501        synchronized(mPlayStateLock) {
1502            native_start();
1503            mPlayState = PLAYSTATE_PLAYING;
1504        }
1505    }
1506
1507    private boolean isRestricted() {
1508        if ((mAttributes.getFlags() & AudioAttributes.FLAG_BYPASS_INTERRUPTION_POLICY) != 0) {
1509            return false;
1510        }
1511        try {
1512            final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType);
1513            final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage,
1514                    Process.myUid(), ActivityThread.currentPackageName());
1515            return mode != AppOpsManager.MODE_ALLOWED;
1516        } catch (RemoteException e) {
1517            return false;
1518        }
1519    }
1520
1521    /**
1522     * Stops playing the audio data.
1523     * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
1524     * after the last buffer that was written has been played. For an immediate stop, use
1525     * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
1526     * back yet.
1527     * @throws IllegalStateException
1528     */
1529    public void stop()
1530    throws IllegalStateException {
1531        if (mState != STATE_INITIALIZED) {
1532            throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
1533        }
1534
1535        // stop playing
1536        synchronized(mPlayStateLock) {
1537            native_stop();
1538            mPlayState = PLAYSTATE_STOPPED;
1539            mAvSyncHeader = null;
1540            mAvSyncBytesRemaining = 0;
1541        }
1542    }
1543
1544    /**
1545     * Pauses the playback of the audio data. Data that has not been played
1546     * back will not be discarded. Subsequent calls to {@link #play} will play
1547     * this data back. See {@link #flush()} to discard this data.
1548     *
1549     * @throws IllegalStateException
1550     */
1551    public void pause()
1552    throws IllegalStateException {
1553        if (mState != STATE_INITIALIZED) {
1554            throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
1555        }
1556        //logd("pause()");
1557
1558        // pause playback
1559        synchronized(mPlayStateLock) {
1560            native_pause();
1561            mPlayState = PLAYSTATE_PAUSED;
1562        }
1563    }
1564
1565
1566    //---------------------------------------------------------
1567    // Audio data supply
1568    //--------------------
1569
1570    /**
1571     * Flushes the audio data currently queued for playback. Any data that has
1572     * been written but not yet presented will be discarded.  No-op if not stopped or paused,
1573     * or if the track's creation mode is not {@link #MODE_STREAM}.
1574     * <BR> Note that although data written but not yet presented is discarded, there is no
1575     * guarantee that all of the buffer space formerly used by that data
1576     * is available for a subsequent write.
1577     * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
1578     * less than or equal to the total buffer size
1579     * may return a short actual transfer count.
1580     */
1581    public void flush() {
1582        if (mState == STATE_INITIALIZED) {
1583            // flush the data in native layer
1584            native_flush();
1585            mAvSyncHeader = null;
1586            mAvSyncBytesRemaining = 0;
1587        }
1588
1589    }
1590
1591    /**
1592     * Writes the audio data to the audio sink for playback (streaming mode),
1593     * or copies audio data for later playback (static buffer mode).
1594     * The format specified in the AudioTrack constructor should be
1595     * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
1596     * In streaming mode, will block until all data has been written to the audio sink.
1597     * In static buffer mode, copies the data to the buffer starting at offset 0.
1598     * Note that the actual playback of this data might occur after this function
1599     * returns. This function is thread safe with respect to {@link #stop} calls,
1600     * in which case all of the specified data might not be written to the audio sink.
1601     *
1602     * @param audioData the array that holds the data to play.
1603     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
1604     *    starts.
1605     * @param sizeInBytes the number of bytes to read in audioData after the offset.
1606     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
1607     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1608     *    the parameters don't resolve to valid data and indexes, or
1609     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1610     *    needs to be recreated.
1611     */
1612    public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
1613        return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
1614    }
1615
1616    /**
1617     * Writes the audio data to the audio sink for playback (streaming mode),
1618     * or copies audio data for later playback (static buffer mode).
1619     * The format specified in the AudioTrack constructor should be
1620     * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
1621     * In streaming mode, will block until all data has been written to the audio sink.
1622     * In static buffer mode, copies the data to the buffer starting at offset 0.
1623     * Note that the actual playback of this data might occur after this function
1624     * returns. This function is thread safe with respect to {@link #stop} calls,
1625     * in which case all of the specified data might not be written to the audio sink.
1626     *
1627     * @param audioData the array that holds the data to play.
1628     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
1629     *    starts.
1630     * @param sizeInBytes the number of bytes to read in audioData after the offset.
1631     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1632     *     effect in static mode.
1633     *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1634     *         to the audio sink.
1635     *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1636     *     queuing as much audio data for playback as possible without blocking.
1637     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
1638     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1639     *    the parameters don't resolve to valid data and indexes, or
1640     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1641     *    needs to be recreated.
1642     */
1643    public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
1644            @WriteMode int writeMode) {
1645
1646        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1647            return ERROR_INVALID_OPERATION;
1648        }
1649
1650        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1651            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1652            return ERROR_BAD_VALUE;
1653        }
1654
1655        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
1656                || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
1657                || (offsetInBytes + sizeInBytes > audioData.length)) {
1658            return ERROR_BAD_VALUE;
1659        }
1660
1661        int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
1662                writeMode == WRITE_BLOCKING);
1663
1664        if ((mDataLoadMode == MODE_STATIC)
1665                && (mState == STATE_NO_STATIC_DATA)
1666                && (ret > 0)) {
1667            // benign race with respect to other APIs that read mState
1668            mState = STATE_INITIALIZED;
1669        }
1670
1671        return ret;
1672    }
1673
1674    /**
1675     * Writes the audio data to the audio sink for playback (streaming mode),
1676     * or copies audio data for later playback (static buffer mode).
1677     * The format specified in the AudioTrack constructor should be
1678     * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
1679     * In streaming mode, will block until all data has been written to the audio sink.
1680     * In static buffer mode, copies the data to the buffer starting at offset 0.
1681     * Note that the actual playback of this data might occur after this function
1682     * returns. This function is thread safe with respect to {@link #stop} calls,
1683     * in which case all of the specified data might not be written to the audio sink.
1684     *
1685     * @param audioData the array that holds the data to play.
1686     * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1687     *     starts.
1688     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1689     * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION}
1690     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1691     *    the parameters don't resolve to valid data and indexes, or
1692     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1693     *    needs to be recreated.
1694     */
1695    public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
1696        return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
1697    }
1698
1699    /**
1700     * Writes the audio data to the audio sink for playback (streaming mode),
1701     * or copies audio data for later playback (static buffer mode).
1702     * The format specified in the AudioTrack constructor should be
1703     * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
1704     * In streaming mode, will block until all data has been written to the audio sink.
1705     * In static buffer mode, copies the data to the buffer starting at offset 0.
1706     * Note that the actual playback of this data might occur after this function
1707     * returns. This function is thread safe with respect to {@link #stop} calls,
1708     * in which case all of the specified data might not be written to the audio sink.
1709     *
1710     * @param audioData the array that holds the data to play.
1711     * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1712     *     starts.
1713     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1714     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1715     *     effect in static mode.
1716     *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1717     *         to the audio sink.
1718     *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1719     *     queuing as much audio data for playback as possible without blocking.
1720     * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION}
1721     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1722     *    the parameters don't resolve to valid data and indexes, or
1723     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1724     *    needs to be recreated.
1725     */
1726    public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
1727            @WriteMode int writeMode) {
1728
1729        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1730            return ERROR_INVALID_OPERATION;
1731        }
1732
1733        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1734            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1735            return ERROR_BAD_VALUE;
1736        }
1737
1738        if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
1739                || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
1740                || (offsetInShorts + sizeInShorts > audioData.length)) {
1741            return ERROR_BAD_VALUE;
1742        }
1743
1744        int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
1745                writeMode == WRITE_BLOCKING);
1746
1747        if ((mDataLoadMode == MODE_STATIC)
1748                && (mState == STATE_NO_STATIC_DATA)
1749                && (ret > 0)) {
1750            // benign race with respect to other APIs that read mState
1751            mState = STATE_INITIALIZED;
1752        }
1753
1754        return ret;
1755    }
1756
1757    /**
1758     * Writes the audio data to the audio sink for playback (streaming mode),
1759     * or copies audio data for later playback (static buffer mode).
1760     * The format specified in the AudioTrack constructor should be
1761     * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
1762     * In static buffer mode, copies the data to the buffer starting at offset 0,
1763     * and the write mode is ignored.
1764     * In streaming mode, the blocking behavior will depend on the write mode.
1765     * <p>
1766     * Note that the actual playback of this data might occur after this function
1767     * returns. This function is thread safe with respect to {@link #stop} calls,
1768     * in which case all of the specified data might not be written to the audio sink.
1769     * <p>
1770     * @param audioData the array that holds the data to play.
1771     *     The implementation does not clip for sample values within the nominal range
1772     *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
1773     *     less than or equal to unity (1.0f), and in the absence of post-processing effects
1774     *     that could add energy, such as reverb.  For the convenience of applications
1775     *     that compute samples using filters with non-unity gain,
1776     *     sample values +3 dB beyond the nominal range are permitted.
1777     *     However such values may eventually be limited or clipped, depending on various gains
1778     *     and later processing in the audio path.  Therefore applications are encouraged
1779     *     to provide samples values within the nominal range.
1780     * @param offsetInFloats the offset, expressed as a number of floats,
1781     *     in audioData where the data to play starts.
1782     * @param sizeInFloats the number of floats to read in audioData after the offset.
1783     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1784     *     effect in static mode.
1785     *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1786     *         to the audio sink.
1787     *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1788     *     queuing as much audio data for playback as possible without blocking.
1789     * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION}
1790     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1791     *    the parameters don't resolve to valid data and indexes, or
1792     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1793     *    needs to be recreated.
1794     */
1795    public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
1796            @WriteMode int writeMode) {
1797
1798        if (mState == STATE_UNINITIALIZED) {
1799            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1800            return ERROR_INVALID_OPERATION;
1801        }
1802
1803        if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
1804            Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
1805            return ERROR_INVALID_OPERATION;
1806        }
1807
1808        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1809            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1810            return ERROR_BAD_VALUE;
1811        }
1812
1813        if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
1814                || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
1815                || (offsetInFloats + sizeInFloats > audioData.length)) {
1816            Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
1817            return ERROR_BAD_VALUE;
1818        }
1819
1820        int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
1821                writeMode == WRITE_BLOCKING);
1822
1823        if ((mDataLoadMode == MODE_STATIC)
1824                && (mState == STATE_NO_STATIC_DATA)
1825                && (ret > 0)) {
1826            // benign race with respect to other APIs that read mState
1827            mState = STATE_INITIALIZED;
1828        }
1829
1830        return ret;
1831    }
1832
1833
1834    /**
1835     * Writes the audio data to the audio sink for playback (streaming mode),
1836     * or copies audio data for later playback (static buffer mode).
1837     * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write
1838     * mode is ignored.
1839     * In streaming mode, the blocking behavior will depend on the write mode.
1840     * @param audioData the buffer that holds the data to play, starting at the position reported
1841     *     by <code>audioData.position()</code>.
1842     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
1843     *     have been advanced to reflect the amount of data that was successfully written to
1844     *     the AudioTrack.
1845     * @param sizeInBytes number of bytes to write.
1846     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
1847     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1848     *     effect in static mode.
1849     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1850     *         to the audio sink.
1851     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1852     *     queuing as much audio data for playback as possible without blocking.
1853     * @return 0 or a positive number of bytes that were written, or
1854     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or
1855     *     {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1856     *     needs to be recreated.
1857     */
1858    public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
1859            @WriteMode int writeMode) {
1860
1861        if (mState == STATE_UNINITIALIZED) {
1862            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1863            return ERROR_INVALID_OPERATION;
1864        }
1865
1866        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1867            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1868            return ERROR_BAD_VALUE;
1869        }
1870
1871        if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
1872            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
1873            return ERROR_BAD_VALUE;
1874        }
1875
1876        int ret = 0;
1877        if (audioData.isDirect()) {
1878            ret = native_write_native_bytes(audioData,
1879                    audioData.position(), sizeInBytes, mAudioFormat,
1880                    writeMode == WRITE_BLOCKING);
1881        } else {
1882            ret = native_write_byte(NioUtils.unsafeArray(audioData),
1883                    NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
1884                    sizeInBytes, mAudioFormat,
1885                    writeMode == WRITE_BLOCKING);
1886        }
1887
1888        if ((mDataLoadMode == MODE_STATIC)
1889                && (mState == STATE_NO_STATIC_DATA)
1890                && (ret > 0)) {
1891            // benign race with respect to other APIs that read mState
1892            mState = STATE_INITIALIZED;
1893        }
1894
1895        if (ret > 0) {
1896            audioData.position(audioData.position() + ret);
1897        }
1898
1899        return ret;
1900    }
1901
1902    /**
1903     * Writes the audio data to the audio sink for playback (streaming mode) on a HW_AV_SYNC track.
1904     * In streaming mode, the blocking behavior will depend on the write mode.
1905     * @param audioData the buffer that holds the data to play, starting at the position reported
1906     *     by <code>audioData.position()</code>.
1907     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
1908     *     have been advanced to reflect the amount of data that was successfully written to
1909     *     the AudioTrack.
1910     * @param sizeInBytes number of bytes to write.
1911     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
1912     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
1913     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1914     *         to the audio sink.
1915     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1916     *     queuing as much audio data for playback as possible without blocking.
1917     * @param timestamp The timestamp of the first decodable audio frame in the provided audioData.
1918     * @return 0 or a positive number of bytes that were written, or
1919     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or
1920     *     {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1921     *     needs to be recreated.
1922     */
1923    public int write(ByteBuffer audioData, int sizeInBytes,
1924            @WriteMode int writeMode, long timestamp) {
1925
1926        if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
1927            Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
1928            return write(audioData, sizeInBytes, writeMode);
1929        }
1930
1931        if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
1932            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
1933            return ERROR_BAD_VALUE;
1934        }
1935
1936        // create timestamp header if none exists
1937        if (mAvSyncHeader == null) {
1938            mAvSyncHeader = ByteBuffer.allocate(16);
1939            mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
1940            mAvSyncHeader.putInt(0x55550001);
1941            mAvSyncHeader.putInt(sizeInBytes);
1942            mAvSyncHeader.putLong(timestamp);
1943            mAvSyncHeader.position(0);
1944            mAvSyncBytesRemaining = sizeInBytes;
1945        }
1946
1947        // write timestamp header if not completely written already
1948        int ret = 0;
1949        if (mAvSyncHeader.remaining() != 0) {
1950            ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
1951            if (ret < 0) {
1952                Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
1953                mAvSyncHeader = null;
1954                mAvSyncBytesRemaining = 0;
1955                return ret;
1956            }
1957            if (mAvSyncHeader.remaining() > 0) {
1958                Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
1959                return 0;
1960            }
1961        }
1962
1963        // write audio data
1964        int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
1965        ret = write(audioData, sizeToWrite, writeMode);
1966        if (ret < 0) {
1967            Log.e(TAG, "AudioTrack.write() could not write audio data!");
1968            mAvSyncHeader = null;
1969            mAvSyncBytesRemaining = 0;
1970            return ret;
1971        }
1972
1973        mAvSyncBytesRemaining -= ret;
1974        if (mAvSyncBytesRemaining == 0) {
1975            mAvSyncHeader = null;
1976        }
1977
1978        return ret;
1979    }
1980
1981
1982    /**
1983     * Sets the playback head position within the static buffer to zero,
1984     * that is it rewinds to start of static buffer.
1985     * The track must be stopped or paused, and
1986     * the track's creation mode must be {@link #MODE_STATIC}.
1987     * <p>
1988     * For API level 22 and above, also resets the value returned by
1989     * {@link #getPlaybackHeadPosition()} to zero.
1990     * For earlier API levels, the reset behavior is unspecified.
1991     * <p>
1992     * {@link #setPlaybackHeadPosition(int)} to zero
1993     * is recommended instead when the reset of {@link #getPlaybackHeadPosition} is not needed.
1994     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1995     *  {@link #ERROR_INVALID_OPERATION}
1996     */
1997    public int reloadStaticData() {
1998        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
1999            return ERROR_INVALID_OPERATION;
2000        }
2001        return native_reload_static();
2002    }
2003
2004    //--------------------------------------------------------------------------
2005    // Audio effects management
2006    //--------------------
2007
2008    /**
2009     * Attaches an auxiliary effect to the audio track. A typical auxiliary
2010     * effect is a reverberation effect which can be applied on any sound source
2011     * that directs a certain amount of its energy to this effect. This amount
2012     * is defined by setAuxEffectSendLevel().
2013     * {@see #setAuxEffectSendLevel(float)}.
2014     * <p>After creating an auxiliary effect (e.g.
2015     * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
2016     * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
2017     * this method to attach the audio track to the effect.
2018     * <p>To detach the effect from the audio track, call this method with a
2019     * null effect id.
2020     *
2021     * @param effectId system wide unique id of the effect to attach
2022     * @return error code or success, see {@link #SUCCESS},
2023     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
2024     */
2025    public int attachAuxEffect(int effectId) {
2026        if (mState == STATE_UNINITIALIZED) {
2027            return ERROR_INVALID_OPERATION;
2028        }
2029        return native_attachAuxEffect(effectId);
2030    }
2031
2032    /**
2033     * Sets the send level of the audio track to the attached auxiliary effect
2034     * {@link #attachAuxEffect(int)}.  Effect levels
2035     * are clamped to the closed interval [0.0, max] where
2036     * max is the value of {@link #getMaxVolume}.
2037     * A value of 0.0 results in no effect, and a value of 1.0 is full send.
2038     * <p>By default the send level is 0.0f, so even if an effect is attached to the player
2039     * this method must be called for the effect to be applied.
2040     * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
2041     * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
2042     * so an appropriate conversion from linear UI input x to level is:
2043     * x == 0 -&gt; level = 0
2044     * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
2045     *
2046     * @param level linear send level
2047     * @return error code or success, see {@link #SUCCESS},
2048     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
2049     */
2050    public int setAuxEffectSendLevel(float level) {
2051        if (isRestricted()) {
2052            return SUCCESS;
2053        }
2054        if (mState == STATE_UNINITIALIZED) {
2055            return ERROR_INVALID_OPERATION;
2056        }
2057        level = clampGainOrLevel(level);
2058        int err = native_setAuxEffectSendLevel(level);
2059        return err == 0 ? SUCCESS : ERROR;
2060    }
2061
2062    //--------------------------------------------------------------------------
2063    // Explicit Routing
2064    //--------------------
2065    private AudioDeviceInfo mPreferredDevice = null;
2066
2067    /**
2068     * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
2069     * the output from this AudioTrack.
2070     * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
2071     *  If deviceInfo is null, default routing is restored.
2072     * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
2073     * does not correspond to a valid audio output device.
2074     */
2075    public boolean setPreferredOutputDevice(AudioDeviceInfo deviceInfo) {
2076        // Do some validation....
2077        if (deviceInfo != null && !deviceInfo.isSink()) {
2078            return false;
2079        }
2080
2081        mPreferredDevice = deviceInfo;
2082        int preferredDeviceId = mPreferredDevice != null ? deviceInfo.getId() : 0;
2083
2084        return native_setOutputDevice(preferredDeviceId);
2085    }
2086
2087    /**
2088     * Returns the selected output specified by {@link #setPreferredOutputDevice}. Note that this
2089     * is not guaranteed to correspond to the actual device being used for playback.
2090     */
2091    public AudioDeviceInfo getPreferredOutputDevice() {
2092        return mPreferredDevice;
2093    }
2094
2095    //--------------------------------------------------------------------------
2096    // (Re)Routing Info
2097    //--------------------
2098    /**
2099     * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
2100     */
2101    public AudioDeviceInfo getRoutedDevice() {
2102        return null;
2103    }
2104
2105    /**
2106     * The message sent to apps when the routing of this AudioTrack changes if they provide
2107     * a {#link Handler} object to addOnAudioTrackRoutingListener().
2108     */
2109    private ArrayMap<OnAudioTrackRoutingListener, NativeRoutingEventHandlerDelegate>
2110        mRoutingChangeListeners =
2111            new ArrayMap<OnAudioTrackRoutingListener, NativeRoutingEventHandlerDelegate>();
2112
2113    /**
2114     * Adds an {@link OnAudioTrackRoutingListener} to receive notifications of routing changes
2115     * on this AudioTrack.
2116     */
2117    public void addOnAudioTrackRoutingListener(OnAudioTrackRoutingListener listener,
2118            android.os.Handler handler) {
2119        if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
2120            synchronized (mRoutingChangeListeners) {
2121                mRoutingChangeListeners.put(
2122                    listener, new NativeRoutingEventHandlerDelegate(this, listener, handler));
2123            }
2124        }
2125    }
2126
2127    /**
2128     * Removes an {@link OnAudioTrackRoutingListener} which has been previously added
2129     * to receive notifications of changes to the set of connected audio devices.
2130     */
2131    public void removeOnAudioTrackRoutingListener(OnAudioTrackRoutingListener listener) {
2132        synchronized (mRoutingChangeListeners) {
2133            if (mRoutingChangeListeners.containsKey(listener)) {
2134                mRoutingChangeListeners.remove(listener);
2135            }
2136        }
2137    }
2138
2139    /**
2140     * Sends device list change notification to all listeners.
2141     */
2142    private void broadcastRoutingChange() {
2143        Collection<NativeRoutingEventHandlerDelegate> values;
2144        synchronized (mRoutingChangeListeners) {
2145            values = mRoutingChangeListeners.values();
2146        }
2147        for(NativeRoutingEventHandlerDelegate delegate : values) {
2148            Handler handler = delegate.getHandler();
2149            if (handler != null) {
2150                handler.sendEmptyMessage(NATIVE_EVENT_ROUTING_CHANGE);
2151            }
2152        }
2153    }
2154
2155    //---------------------------------------------------------
2156    // Interface definitions
2157    //--------------------
2158    /**
2159     * Interface definition for a callback to be invoked when the playback head position of
2160     * an AudioTrack has reached a notification marker or has increased by a certain period.
2161     */
2162    public interface OnPlaybackPositionUpdateListener  {
2163        /**
2164         * Called on the listener to notify it that the previously set marker has been reached
2165         * by the playback head.
2166         */
2167        void onMarkerReached(AudioTrack track);
2168
2169        /**
2170         * Called on the listener to periodically notify it that the playback head has reached
2171         * a multiple of the notification period.
2172         */
2173        void onPeriodicNotification(AudioTrack track);
2174    }
2175
2176    //---------------------------------------------------------
2177    // Inner classes
2178    //--------------------
2179    /**
2180     * Helper class to handle the forwarding of native events to the appropriate listener
2181     * (potentially) handled in a different thread
2182     */
2183    private class NativePositionEventHandlerDelegate {
2184        private final Handler mHandler;
2185
2186        NativePositionEventHandlerDelegate(final AudioTrack track,
2187                                   final OnPlaybackPositionUpdateListener listener,
2188                                   Handler handler) {
2189            // find the looper for our new event handler
2190            Looper looper;
2191            if (handler != null) {
2192                looper = handler.getLooper();
2193            } else {
2194                // no given handler, use the looper the AudioTrack was created in
2195                looper = mInitializationLooper;
2196            }
2197
2198            // construct the event handler with this looper
2199            if (looper != null) {
2200                // implement the event handler delegate
2201                mHandler = new Handler(looper) {
2202                    @Override
2203                    public void handleMessage(Message msg) {
2204                        if (track == null) {
2205                            return;
2206                        }
2207                        switch(msg.what) {
2208                        case NATIVE_EVENT_MARKER:
2209                            if (listener != null) {
2210                                listener.onMarkerReached(track);
2211                            }
2212                            break;
2213                        case NATIVE_EVENT_NEW_POS:
2214                            if (listener != null) {
2215                                listener.onPeriodicNotification(track);
2216                            }
2217                            break;
2218                        default:
2219                            loge("Unknown native event type: " + msg.what);
2220                            break;
2221                        }
2222                    }
2223                };
2224            } else {
2225                mHandler = null;
2226            }
2227        }
2228
2229        Handler getHandler() {
2230            return mHandler;
2231        }
2232    }
2233
2234    /**
2235     * Helper class to handle the forwarding of native events to the appropriate listener
2236     * (potentially) handled in a different thread
2237     */
2238    private class NativeRoutingEventHandlerDelegate {
2239        private final Handler mHandler;
2240
2241        NativeRoutingEventHandlerDelegate(final AudioTrack track,
2242                                   final OnAudioTrackRoutingListener listener,
2243                                   Handler handler) {
2244            // find the looper for our new event handler
2245            Looper looper;
2246            if (handler != null) {
2247                looper = handler.getLooper();
2248            } else {
2249                // no given handler, use the looper the AudioTrack was created in
2250                looper = mInitializationLooper;
2251            }
2252
2253            // construct the event handler with this looper
2254            if (looper != null) {
2255                // implement the event handler delegate
2256                mHandler = new Handler(looper) {
2257                    @Override
2258                    public void handleMessage(Message msg) {
2259                        if (track == null) {
2260                            return;
2261                        }
2262                        switch(msg.what) {
2263                        case NATIVE_EVENT_ROUTING_CHANGE:
2264                            if (listener != null) {
2265                                listener.onAudioTrackRouting(track);
2266                            }
2267                            break;
2268                        default:
2269                            loge("Unknown native event type: " + msg.what);
2270                            break;
2271                        }
2272                    }
2273                };
2274            } else {
2275                mHandler = null;
2276            }
2277        }
2278
2279        Handler getHandler() {
2280            return mHandler;
2281        }
2282    }
2283
2284    //---------------------------------------------------------
2285    // Java methods called from the native side
2286    //--------------------
2287    @SuppressWarnings("unused")
2288    private static void postEventFromNative(Object audiotrack_ref,
2289            int what, int arg1, int arg2, Object obj) {
2290        //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
2291        AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
2292        if (track == null) {
2293            return;
2294        }
2295
2296        NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
2297        if (delegate != null) {
2298            Handler handler = delegate.getHandler();
2299            if (handler != null) {
2300                Message m = handler.obtainMessage(what, arg1, arg2, obj);
2301                handler.sendMessage(m);
2302            }
2303        }
2304
2305    }
2306
2307
2308    //---------------------------------------------------------
2309    // Native methods called from the Java side
2310    //--------------------
2311
2312    // post-condition: mStreamType is overwritten with a value
2313    //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
2314    //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
2315    private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
2316            Object /*AudioAttributes*/ attributes,
2317            int sampleRate, int channelMask, int channelIndexMask, int audioFormat,
2318            int buffSizeInBytes, int mode, int[] sessionId);
2319
2320    private native final void native_finalize();
2321
2322    private native final void native_release();
2323
2324    private native final void native_start();
2325
2326    private native final void native_stop();
2327
2328    private native final void native_pause();
2329
2330    private native final void native_flush();
2331
2332    private native final int native_write_byte(byte[] audioData,
2333                                               int offsetInBytes, int sizeInBytes, int format,
2334                                               boolean isBlocking);
2335
2336    private native final int native_write_short(short[] audioData,
2337                                                int offsetInShorts, int sizeInShorts, int format,
2338                                                boolean isBlocking);
2339
2340    private native final int native_write_float(float[] audioData,
2341                                                int offsetInFloats, int sizeInFloats, int format,
2342                                                boolean isBlocking);
2343
2344    private native final int native_write_native_bytes(Object audioData,
2345            int positionInBytes, int sizeInBytes, int format, boolean blocking);
2346
2347    private native final int native_reload_static();
2348
2349    private native final int native_get_native_frame_count();
2350
2351    private native final void native_setVolume(float leftVolume, float rightVolume);
2352
2353    private native final int native_set_playback_rate(int sampleRateInHz);
2354    private native final int native_get_playback_rate();
2355
2356    // floatArray must be a non-null array of length >= 2
2357    // [0] is speed
2358    // [1] is pitch
2359    // intArray must be a non-null array of length >= 2
2360    // [0] is audio fallback mode
2361    // [1] is audio stretch mode
2362    private native final void native_set_playback_settings(float[] floatArray, int[] intArray);
2363    private native final void native_get_playback_settings(float[] floatArray, int[] intArray);
2364
2365    private native final int native_set_marker_pos(int marker);
2366    private native final int native_get_marker_pos();
2367
2368    private native final int native_set_pos_update_period(int updatePeriod);
2369    private native final int native_get_pos_update_period();
2370
2371    private native final int native_set_position(int position);
2372    private native final int native_get_position();
2373
2374    private native final int native_get_latency();
2375
2376    // longArray must be a non-null array of length >= 2
2377    // [0] is assigned the frame position
2378    // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
2379    private native final int native_get_timestamp(long[] longArray);
2380
2381    private native final int native_set_loop(int start, int end, int loopCount);
2382
2383    static private native final int native_get_output_sample_rate(int streamType);
2384    static private native final int native_get_min_buff_size(
2385            int sampleRateInHz, int channelConfig, int audioFormat);
2386
2387    private native final int native_attachAuxEffect(int effectId);
2388    private native final int native_setAuxEffectSendLevel(float level);
2389
2390    private native final boolean native_setOutputDevice(int deviceId);
2391
2392    //---------------------------------------------------------
2393    // Utility methods
2394    //------------------
2395
2396    private static void logd(String msg) {
2397        Log.d(TAG, msg);
2398    }
2399
2400    private static void loge(String msg) {
2401        Log.e(TAG, msg);
2402    }
2403}
2404