AudioTrack.java revision eeecfa44010321952a2f156bb883ca8a1331a451
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.media;
18
19import java.lang.annotation.Retention;
20import java.lang.annotation.RetentionPolicy;
21import java.lang.ref.WeakReference;
22import java.lang.Math;
23import java.nio.ByteBuffer;
24import java.nio.ByteOrder;
25import java.nio.NioUtils;
26import java.util.Collection;
27
28import android.annotation.IntDef;
29import android.annotation.NonNull;
30import android.app.ActivityThread;
31import android.app.AppOpsManager;
32import android.content.Context;
33import android.os.Handler;
34import android.os.IBinder;
35import android.os.Looper;
36import android.os.Message;
37import android.os.Process;
38import android.os.RemoteException;
39import android.os.ServiceManager;
40import android.util.ArrayMap;
41import android.util.Log;
42
43import com.android.internal.app.IAppOpsService;
44
45/**
46 * The AudioTrack class manages and plays a single audio resource for Java applications.
47 * It allows streaming of PCM audio buffers to the audio sink for playback. This is
48 * achieved by "pushing" the data to the AudioTrack object using one of the
49 *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
50 *  and {@link #write(float[], int, int, int)} methods.
51 *
52 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
53 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
54 * one of the {@code write()} methods. These are blocking and return when the data has been
55 * transferred from the Java layer to the native layer and queued for playback. The streaming
56 * mode is most useful when playing blocks of audio data that for instance are:
57 *
58 * <ul>
59 *   <li>too big to fit in memory because of the duration of the sound to play,</li>
60 *   <li>too big to fit in memory because of the characteristics of the audio data
61 *         (high sampling rate, bits per sample ...)</li>
62 *   <li>received or generated while previously queued audio is playing.</li>
63 * </ul>
64 *
65 * The static mode should be chosen when dealing with short sounds that fit in memory and
66 * that need to be played with the smallest latency possible. The static mode will
67 * therefore be preferred for UI and game sounds that are played often, and with the
68 * smallest overhead possible.
69 *
70 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
71 * The size of this buffer, specified during the construction, determines how long an AudioTrack
72 * can play before running out of data.<br>
73 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
74 * be played from it.<br>
75 * For the streaming mode, data will be written to the audio sink in chunks of
76 * sizes less than or equal to the total buffer size.
77 *
78 * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
79 */
80public class AudioTrack implements AudioRouting
81{
82    //---------------------------------------------------------
83    // Constants
84    //--------------------
85    /** Minimum value for a linear gain or auxiliary effect level.
86     *  This value must be exactly equal to 0.0f; do not change it.
87     */
88    private static final float GAIN_MIN = 0.0f;
89    /** Maximum value for a linear gain or auxiliary effect level.
90     *  This value must be greater than or equal to 1.0f.
91     */
92    private static final float GAIN_MAX = 1.0f;
93
94    /** Maximum value for AudioTrack channel count
95     * @hide public for MediaCode only, do not un-hide or change to a numeric literal
96     */
97    public static final int CHANNEL_COUNT_MAX = native_get_FCC_8();
98
99    /** indicates AudioTrack state is stopped */
100    public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
101    /** indicates AudioTrack state is paused */
102    public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
103    /** indicates AudioTrack state is playing */
104    public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
105
106    // keep these values in sync with android_media_AudioTrack.cpp
107    /**
108     * Creation mode where audio data is transferred from Java to the native layer
109     * only once before the audio starts playing.
110     */
111    public static final int MODE_STATIC = 0;
112    /**
113     * Creation mode where audio data is streamed from Java to the native layer
114     * as the audio is playing.
115     */
116    public static final int MODE_STREAM = 1;
117
118    /** @hide */
119    @IntDef({
120        MODE_STATIC,
121        MODE_STREAM
122    })
123    @Retention(RetentionPolicy.SOURCE)
124    public @interface TransferMode {}
125
126    /**
127     * State of an AudioTrack that was not successfully initialized upon creation.
128     */
129    public static final int STATE_UNINITIALIZED = 0;
130    /**
131     * State of an AudioTrack that is ready to be used.
132     */
133    public static final int STATE_INITIALIZED   = 1;
134    /**
135     * State of a successfully initialized AudioTrack that uses static data,
136     * but that hasn't received that data yet.
137     */
138    public static final int STATE_NO_STATIC_DATA = 2;
139
140    /**
141     * Denotes a successful operation.
142     */
143    public  static final int SUCCESS                               = AudioSystem.SUCCESS;
144    /**
145     * Denotes a generic operation failure.
146     */
147    public  static final int ERROR                                 = AudioSystem.ERROR;
148    /**
149     * Denotes a failure due to the use of an invalid value.
150     */
151    public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
152    /**
153     * Denotes a failure due to the improper use of a method.
154     */
155    public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
156    /**
157     * An error code indicating that the object reporting it is no longer valid and needs to
158     * be recreated.
159     * @hide
160     */
161    public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
162    /**
163     * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
164     * or immediately after start/ACTIVE.
165     * @hide
166     */
167    public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
168
169    // Error codes:
170    // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
171    private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
172    private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
173    private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
174    private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
175    private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
176
177    // Events:
178    // to keep in sync with frameworks/av/include/media/AudioTrack.h
179    /**
180     * Event id denotes when playback head has reached a previously set marker.
181     */
182    private static final int NATIVE_EVENT_MARKER  = 3;
183    /**
184     * Event id denotes when previously set update period has elapsed during playback.
185     */
186    private static final int NATIVE_EVENT_NEW_POS = 4;
187
188    private final static String TAG = "android.media.AudioTrack";
189
190
191    /** @hide */
192    @IntDef({
193        WRITE_BLOCKING,
194        WRITE_NON_BLOCKING
195    })
196    @Retention(RetentionPolicy.SOURCE)
197    public @interface WriteMode {}
198
199    /**
200     * The write mode indicating the write operation will block until all data has been written,
201     * to be used as the actual value of the writeMode parameter in
202     * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
203     * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
204     * {@link #write(ByteBuffer, int, int, long)}.
205     */
206    public final static int WRITE_BLOCKING = 0;
207
208    /**
209     * The write mode indicating the write operation will return immediately after
210     * queuing as much audio data for playback as possible without blocking,
211     * to be used as the actual value of the writeMode parameter in
212     * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
213     * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
214     * {@link #write(ByteBuffer, int, int, long)}.
215     */
216    public final static int WRITE_NON_BLOCKING = 1;
217
218    //--------------------------------------------------------------------------
219    // Member variables
220    //--------------------
221    /**
222     * Indicates the state of the AudioTrack instance.
223     * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
224     */
225    private int mState = STATE_UNINITIALIZED;
226    /**
227     * Indicates the play state of the AudioTrack instance.
228     * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
229     */
230    private int mPlayState = PLAYSTATE_STOPPED;
231    /**
232     * Lock to ensure mPlayState updates reflect the actual state of the object.
233     */
234    private final Object mPlayStateLock = new Object();
235    /**
236     * Sizes of the native audio buffer.
237     * These values are set during construction and can be stale.
238     * To obtain the current native audio buffer frame count use {@link #getBufferSizeInFrames()}.
239     */
240    private int mNativeBufferSizeInBytes = 0;
241    private int mNativeBufferSizeInFrames = 0;
242    /**
243     * Handler for events coming from the native code.
244     */
245    private NativePositionEventHandlerDelegate mEventHandlerDelegate;
246    /**
247     * Looper associated with the thread that creates the AudioTrack instance.
248     */
249    private final Looper mInitializationLooper;
250    /**
251     * The audio data source sampling rate in Hz.
252     * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
253     */
254    private int mSampleRate; // initialized by all constructors via audioParamCheck()
255    /**
256     * The number of audio output channels (1 is mono, 2 is stereo, etc.).
257     */
258    private int mChannelCount = 1;
259    /**
260     * The audio channel mask used for calling native AudioTrack
261     */
262    private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
263
264    /**
265     * The type of the audio stream to play. See
266     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
267     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
268     *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
269     *   {@link AudioManager#STREAM_DTMF}.
270     */
271    private int mStreamType = AudioManager.STREAM_MUSIC;
272
273    private final AudioAttributes mAttributes;
274    /**
275     * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
276     */
277    private int mDataLoadMode = MODE_STREAM;
278    /**
279     * The current channel position mask, as specified on AudioTrack creation.
280     * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
281     * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
282     */
283    private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
284    /**
285     * The channel index mask if specified, otherwise 0.
286     */
287    private int mChannelIndexMask = 0;
288    /**
289     * The encoding of the audio samples.
290     * @see AudioFormat#ENCODING_PCM_8BIT
291     * @see AudioFormat#ENCODING_PCM_16BIT
292     * @see AudioFormat#ENCODING_PCM_FLOAT
293     */
294    private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
295    /**
296     * Audio session ID
297     */
298    private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
299    /**
300     * Reference to the app-ops service.
301     */
302    private final IAppOpsService mAppOps;
303    /**
304     * HW_AV_SYNC track AV Sync Header
305     */
306    private ByteBuffer mAvSyncHeader = null;
307    /**
308     * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
309     */
310    private int mAvSyncBytesRemaining = 0;
311
312    //--------------------------------
313    // Used exclusively by native code
314    //--------------------
315    /**
316     * @hide
317     * Accessed by native methods: provides access to C++ AudioTrack object.
318     */
319    @SuppressWarnings("unused")
320    protected long mNativeTrackInJavaObj;
321    /**
322     * Accessed by native methods: provides access to the JNI data (i.e. resources used by
323     * the native AudioTrack object, but not stored in it).
324     */
325    @SuppressWarnings("unused")
326    private long mJniData;
327
328
329    //--------------------------------------------------------------------------
330    // Constructor, Finalize
331    //--------------------
332    /**
333     * Class constructor.
334     * @param streamType the type of the audio stream. See
335     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
336     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
337     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
338     * @param sampleRateInHz the initial source sample rate expressed in Hz.
339     *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
340     *   which is usually the sample rate of the sink.
341     *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
342     * @param channelConfig describes the configuration of the audio channels.
343     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
344     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
345     * @param audioFormat the format in which the audio data is represented.
346     *   See {@link AudioFormat#ENCODING_PCM_16BIT},
347     *   {@link AudioFormat#ENCODING_PCM_8BIT},
348     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
349     * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
350     *   read from for playback. This should be a multiple of the frame size in bytes.
351     *   <p> If the track's creation mode is {@link #MODE_STATIC},
352     *   this is the maximum length sample, or audio clip, that can be played by this instance.
353     *   <p> If the track's creation mode is {@link #MODE_STREAM},
354     *   this should be the desired buffer size
355     *   for the <code>AudioTrack</code> to satisfy the application's
356     *   natural latency requirements.
357     *   If <code>bufferSizeInBytes</code> is less than the
358     *   minimum buffer size for the output sink, it is automatically increased to the minimum
359     *   buffer size.
360     *   The method {@link #getBufferSizeInFrames()} returns the
361     *   actual size in frames of the native buffer created, which
362     *   determines the frequency to write
363     *   to the streaming <code>AudioTrack</code> to avoid underrun.
364     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
365     * @throws java.lang.IllegalArgumentException
366     */
367    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
368            int bufferSizeInBytes, int mode)
369    throws IllegalArgumentException {
370        this(streamType, sampleRateInHz, channelConfig, audioFormat,
371                bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
372    }
373
374    /**
375     * Class constructor with audio session. Use this constructor when the AudioTrack must be
376     * attached to a particular audio session. The primary use of the audio session ID is to
377     * associate audio effects to a particular instance of AudioTrack: if an audio session ID
378     * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
379     * and media players in the same session and not to the output mix.
380     * When an AudioTrack is created without specifying a session, it will create its own session
381     * which can be retrieved by calling the {@link #getAudioSessionId()} method.
382     * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
383     * session
384     * with all other media players or audio tracks in the same session, otherwise a new session
385     * will be created for this track if none is supplied.
386     * @param streamType the type of the audio stream. See
387     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
388     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
389     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
390     * @param sampleRateInHz the initial source sample rate expressed in Hz.
391     *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
392     *   which is usually the sample rate of the sink.
393     * @param channelConfig describes the configuration of the audio channels.
394     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
395     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
396     * @param audioFormat the format in which the audio data is represented.
397     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
398     *   {@link AudioFormat#ENCODING_PCM_8BIT},
399     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
400     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
401     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
402     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
403     *   this is the maximum size of the sound that will be played for this instance.
404     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
405     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
406     *   smaller than getMinBufferSize() will result in an initialization failure.
407     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
408     * @param sessionId Id of audio session the AudioTrack must be attached to
409     * @throws java.lang.IllegalArgumentException
410     */
411    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
412            int bufferSizeInBytes, int mode, int sessionId)
413    throws IllegalArgumentException {
414        // mState already == STATE_UNINITIALIZED
415        this((new AudioAttributes.Builder())
416                    .setLegacyStreamType(streamType)
417                    .build(),
418                (new AudioFormat.Builder())
419                    .setChannelMask(channelConfig)
420                    .setEncoding(audioFormat)
421                    .setSampleRate(sampleRateInHz)
422                    .build(),
423                bufferSizeInBytes,
424                mode, sessionId);
425    }
426
427    /**
428     * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
429     * @param attributes a non-null {@link AudioAttributes} instance.
430     * @param format a non-null {@link AudioFormat} instance describing the format of the data
431     *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
432     *     configuring the audio format parameters such as encoding, channel mask and sample rate.
433     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
434     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
435     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
436     *   this is the maximum size of the sound that will be played for this instance.
437     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
438     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
439     *   smaller than getMinBufferSize() will result in an initialization failure.
440     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
441     * @param sessionId ID of audio session the AudioTrack must be attached to, or
442     *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
443     *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
444     *   construction.
445     * @throws IllegalArgumentException
446     */
447    public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
448            int mode, int sessionId)
449                    throws IllegalArgumentException {
450        // mState already == STATE_UNINITIALIZED
451
452        if (attributes == null) {
453            throw new IllegalArgumentException("Illegal null AudioAttributes");
454        }
455        if (format == null) {
456            throw new IllegalArgumentException("Illegal null AudioFormat");
457        }
458
459        // remember which looper is associated with the AudioTrack instantiation
460        Looper looper;
461        if ((looper = Looper.myLooper()) == null) {
462            looper = Looper.getMainLooper();
463        }
464
465        int rate = format.getSampleRate();
466        if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
467            rate = 0;
468        }
469
470        int channelIndexMask = 0;
471        if ((format.getPropertySetMask()
472                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
473            channelIndexMask = format.getChannelIndexMask();
474        }
475        int channelMask = 0;
476        if ((format.getPropertySetMask()
477                & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
478            channelMask = format.getChannelMask();
479        } else if (channelIndexMask == 0) { // if no masks at all, use stereo
480            channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
481                    | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
482        }
483        int encoding = AudioFormat.ENCODING_DEFAULT;
484        if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
485            encoding = format.getEncoding();
486        }
487        audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
488        mStreamType = AudioSystem.STREAM_DEFAULT;
489
490        audioBuffSizeCheck(bufferSizeInBytes);
491
492        mInitializationLooper = looper;
493        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
494        mAppOps = IAppOpsService.Stub.asInterface(b);
495
496        mAttributes = new AudioAttributes.Builder(attributes).build();
497
498        if (sessionId < 0) {
499            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
500        }
501
502        int[] sampleRate = new int[] {mSampleRate};
503        int[] session = new int[1];
504        session[0] = sessionId;
505        // native initialization
506        int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
507                sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
508                mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/);
509        if (initResult != SUCCESS) {
510            loge("Error code "+initResult+" when initializing AudioTrack.");
511            return; // with mState == STATE_UNINITIALIZED
512        }
513
514        mSampleRate = sampleRate[0];
515        mSessionId = session[0];
516
517        if (mDataLoadMode == MODE_STATIC) {
518            mState = STATE_NO_STATIC_DATA;
519        } else {
520            mState = STATE_INITIALIZED;
521        }
522    }
523
524    /**
525     * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
526     * the AudioTrackRoutingProxy subclass.
527     * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
528     * (associated with an OpenSL ES player).
529     * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
530     * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
531     * it means that the OpenSL player interface hasn't been realized, so there is no native
532     * Audiotrack to connect to. In this case wait to call deferred_connect() until the
533     * OpenSLES interface is realized.
534     */
535    /*package*/ AudioTrack(long nativeTrackInJavaObj) {
536        // "final"s
537        mAttributes = null;
538        mAppOps = null;
539        mNativeTrackInJavaObj = 0;
540        mJniData = 0;
541
542        // remember which looper is associated with the AudioTrack instantiation
543        Looper looper;
544        if ((looper = Looper.myLooper()) == null) {
545            looper = Looper.getMainLooper();
546        }
547        mInitializationLooper = looper;
548
549        // other initialization...
550        if (nativeTrackInJavaObj != 0) {
551            deferred_connect(nativeTrackInJavaObj);
552        } else {
553            mState = STATE_UNINITIALIZED;
554        }
555    }
556
557    /**
558     * @hide
559     */
560    /* package */ void deferred_connect(long nativeTrackInJavaObj) {
561        if (mState != STATE_INITIALIZED) {
562            // Note that for this native_setup, we are providing an already created/initialized
563            // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
564            int[] session = { 0 };
565            int[] rates = { 0 };
566            int initResult = native_setup(new WeakReference<AudioTrack>(this),
567                    null /*mAttributes - NA*/,
568                    rates /*sampleRate - NA*/,
569                    0 /*mChannelMask - NA*/,
570                    0 /*mChannelIndexMask - NA*/,
571                    0 /*mAudioFormat - NA*/,
572                    0 /*mNativeBufferSizeInBytes - NA*/,
573                    0 /*mDataLoadMode - NA*/,
574                    session,
575                    nativeTrackInJavaObj);
576            if (initResult != SUCCESS) {
577                loge("Error code "+initResult+" when initializing AudioTrack.");
578                return; // with mState == STATE_UNINITIALIZED
579            }
580
581            mSessionId = session[0];
582
583            mState = STATE_INITIALIZED;
584        }
585    }
586
587    /**
588     * Builder class for {@link AudioTrack} objects.
589     * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
590     * attributes and audio format parameters, you indicate which of those vary from the default
591     * behavior on the device.
592     * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
593     * parameters, to be used by a new <code>AudioTrack</code> instance:
594     *
595     * <pre class="prettyprint">
596     * AudioTrack player = new AudioTrack.Builder()
597     *         .setAudioAttributes(new AudioAttributes.Builder()
598     *                  .setUsage(AudioAttributes.USAGE_ALARM)
599     *                  .setContentType(CONTENT_TYPE_MUSIC)
600     *                  .build())
601     *         .setAudioFormat(new AudioFormat.Builder()
602     *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
603     *                 .setSampleRate(441000)
604     *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
605     *                 .build())
606     *         .setBufferSize(minBuffSize)
607     *         .build();
608     * </pre>
609     * <p>
610     * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
611     * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
612     * <br>If the audio format is not specified or is incomplete, its sample rate will be the
613     * default output sample rate of the device (see
614     * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be
615     * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
616     * {@link AudioFormat#ENCODING_PCM_16BIT}.
617     * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
618     * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
619     * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
620     * <code>MODE_STREAM</code> will be used.
621     * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
622     * be generated.
623     */
624    public static class Builder {
625        private AudioAttributes mAttributes;
626        private AudioFormat mFormat;
627        private int mBufferSizeInBytes;
628        private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
629        private int mMode = MODE_STREAM;
630
631        /**
632         * Constructs a new Builder with the default values as described above.
633         */
634        public Builder() {
635        }
636
637        /**
638         * Sets the {@link AudioAttributes}.
639         * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
640         *     data to be played.
641         * @return the same Builder instance.
642         * @throws IllegalArgumentException
643         */
644        public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
645                throws IllegalArgumentException {
646            if (attributes == null) {
647                throw new IllegalArgumentException("Illegal null AudioAttributes argument");
648            }
649            // keep reference, we only copy the data when building
650            mAttributes = attributes;
651            return this;
652        }
653
654        /**
655         * Sets the format of the audio data to be played by the {@link AudioTrack}.
656         * See {@link AudioFormat.Builder} for configuring the audio format parameters such
657         * as encoding, channel mask and sample rate.
658         * @param format a non-null {@link AudioFormat} instance.
659         * @return the same Builder instance.
660         * @throws IllegalArgumentException
661         */
662        public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
663                throws IllegalArgumentException {
664            if (format == null) {
665                throw new IllegalArgumentException("Illegal null AudioFormat argument");
666            }
667            // keep reference, we only copy the data when building
668            mFormat = format;
669            return this;
670        }
671
672        /**
673         * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
674         * If using the {@link AudioTrack} in streaming mode
675         * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
676         * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
677         * the minimum required buffer size for the successful creation of an AudioTrack instance
678         * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result
679         * in an exception when trying to build the <code>AudioTrack</code>.
680         * <br>If using the <code>AudioTrack</code> in static mode (see
681         * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
682         * played by this instance.
683         * @param bufferSizeInBytes
684         * @return the same Builder instance.
685         * @throws IllegalArgumentException
686         */
687        public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes)
688                throws IllegalArgumentException {
689            if (bufferSizeInBytes <= 0) {
690                throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
691            }
692            mBufferSizeInBytes = bufferSizeInBytes;
693            return this;
694        }
695
696        /**
697         * Sets the mode under which buffers of audio data are transferred from the
698         * {@link AudioTrack} to the framework.
699         * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
700         * @return the same Builder instance.
701         * @throws IllegalArgumentException
702         */
703        public @NonNull Builder setTransferMode(@TransferMode int mode)
704                throws IllegalArgumentException {
705            switch(mode) {
706                case MODE_STREAM:
707                case MODE_STATIC:
708                    mMode = mode;
709                    break;
710                default:
711                    throw new IllegalArgumentException("Invalid transfer mode " + mode);
712            }
713            return this;
714        }
715
716        /**
717         * Sets the session ID the {@link AudioTrack} will be attached to.
718         * @param sessionId a strictly positive ID number retrieved from another
719         *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
720         *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
721         *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
722         * @return the same Builder instance.
723         * @throws IllegalArgumentException
724         */
725        public @NonNull Builder setSessionId(int sessionId)
726                throws IllegalArgumentException {
727            if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
728                throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
729            }
730            mSessionId = sessionId;
731            return this;
732        }
733
734        /**
735         * Builds an {@link AudioTrack} instance initialized with all the parameters set
736         * on this <code>Builder</code>.
737         * @return a new successfully initialized {@link AudioTrack} instance.
738         * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
739         *     were incompatible, or if they are not supported by the device,
740         *     or if the device was not available.
741         */
742        public @NonNull AudioTrack build() throws UnsupportedOperationException {
743            if (mAttributes == null) {
744                mAttributes = new AudioAttributes.Builder()
745                        .setUsage(AudioAttributes.USAGE_MEDIA)
746                        .build();
747            }
748            if (mFormat == null) {
749                mFormat = new AudioFormat.Builder()
750                        .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
751                        //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
752                        .setEncoding(AudioFormat.ENCODING_DEFAULT)
753                        .build();
754            }
755            try {
756                // If the buffer size is not specified in streaming mode,
757                // use a single frame for the buffer size and let the
758                // native code figure out the minimum buffer size.
759                if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
760                    mBufferSizeInBytes = mFormat.getChannelCount()
761                            * mFormat.getBytesPerSample(mFormat.getEncoding());
762                }
763                final AudioTrack track = new AudioTrack(
764                        mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId);
765                if (track.getState() == STATE_UNINITIALIZED) {
766                    // release is not necessary
767                    throw new UnsupportedOperationException("Cannot create AudioTrack");
768                }
769                return track;
770            } catch (IllegalArgumentException e) {
771                throw new UnsupportedOperationException(e.getMessage());
772            }
773        }
774    }
775
776    // mask of all the positional channels supported, however the allowed combinations
777    // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX
778    private static final int SUPPORTED_OUT_CHANNELS =
779            AudioFormat.CHANNEL_OUT_FRONT_LEFT |
780            AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
781            AudioFormat.CHANNEL_OUT_FRONT_CENTER |
782            AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
783            AudioFormat.CHANNEL_OUT_BACK_LEFT |
784            AudioFormat.CHANNEL_OUT_BACK_RIGHT |
785            AudioFormat.CHANNEL_OUT_BACK_CENTER |
786            AudioFormat.CHANNEL_OUT_SIDE_LEFT |
787            AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
788
789    // Convenience method for the constructor's parameter checks.
790    // This is where constructor IllegalArgumentException-s are thrown
791    // postconditions:
792    //    mChannelCount is valid
793    //    mChannelMask is valid
794    //    mAudioFormat is valid
795    //    mSampleRate is valid
796    //    mDataLoadMode is valid
797    private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
798                                 int audioFormat, int mode) {
799        //--------------
800        // sample rate, note these values are subject to change
801        if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
802                sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
803                sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
804            throw new IllegalArgumentException(sampleRateInHz
805                    + "Hz is not a supported sample rate.");
806        }
807        mSampleRate = sampleRateInHz;
808
809        // IEC61937 is based on stereo. We could coerce it to stereo.
810        // But the application needs to know the stream is stereo so that
811        // it is encoded and played correctly. So better to just reject it.
812        if (audioFormat == AudioFormat.ENCODING_IEC61937
813                && channelConfig != AudioFormat.CHANNEL_OUT_STEREO) {
814            throw new IllegalArgumentException(
815                    "ENCODING_IEC61937 must be configured as CHANNEL_OUT_STEREO");
816        }
817
818        //--------------
819        // channel config
820        mChannelConfiguration = channelConfig;
821
822        switch (channelConfig) {
823        case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
824        case AudioFormat.CHANNEL_OUT_MONO:
825        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
826            mChannelCount = 1;
827            mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
828            break;
829        case AudioFormat.CHANNEL_OUT_STEREO:
830        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
831            mChannelCount = 2;
832            mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
833            break;
834        default:
835            if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
836                mChannelCount = 0;
837                break; // channel index configuration only
838            }
839            if (!isMultichannelConfigSupported(channelConfig)) {
840                // input channel configuration features unsupported channels
841                throw new IllegalArgumentException("Unsupported channel configuration.");
842            }
843            mChannelMask = channelConfig;
844            mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
845        }
846        // check the channel index configuration (if present)
847        mChannelIndexMask = channelIndexMask;
848        if (mChannelIndexMask != 0) {
849            // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2
850            final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1;
851            if ((channelIndexMask & ~indexMask) != 0) {
852                throw new IllegalArgumentException("Unsupported channel index configuration "
853                        + channelIndexMask);
854            }
855            int channelIndexCount = Integer.bitCount(channelIndexMask);
856            if (mChannelCount == 0) {
857                 mChannelCount = channelIndexCount;
858            } else if (mChannelCount != channelIndexCount) {
859                throw new IllegalArgumentException("Channel count must match");
860            }
861        }
862
863        //--------------
864        // audio format
865        if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
866            audioFormat = AudioFormat.ENCODING_PCM_16BIT;
867        }
868
869        if (!AudioFormat.isPublicEncoding(audioFormat)) {
870            throw new IllegalArgumentException("Unsupported audio encoding.");
871        }
872        mAudioFormat = audioFormat;
873
874        //--------------
875        // audio load mode
876        if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
877                ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
878            throw new IllegalArgumentException("Invalid mode.");
879        }
880        mDataLoadMode = mode;
881    }
882
883    /**
884     * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
885     * @param channelConfig the mask to validate
886     * @return false if the AudioTrack can't be used with such a mask
887     */
888    private static boolean isMultichannelConfigSupported(int channelConfig) {
889        // check for unsupported channels
890        if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
891            loge("Channel configuration features unsupported channels");
892            return false;
893        }
894        final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
895        if (channelCount > CHANNEL_COUNT_MAX) {
896            loge("Channel configuration contains too many channels " +
897                    channelCount + ">" + CHANNEL_COUNT_MAX);
898            return false;
899        }
900        // check for unsupported multichannel combinations:
901        // - FL/FR must be present
902        // - L/R channels must be paired (e.g. no single L channel)
903        final int frontPair =
904                AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
905        if ((channelConfig & frontPair) != frontPair) {
906                loge("Front channels must be present in multichannel configurations");
907                return false;
908        }
909        final int backPair =
910                AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
911        if ((channelConfig & backPair) != 0) {
912            if ((channelConfig & backPair) != backPair) {
913                loge("Rear channels can't be used independently");
914                return false;
915            }
916        }
917        final int sidePair =
918                AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
919        if ((channelConfig & sidePair) != 0
920                && (channelConfig & sidePair) != sidePair) {
921            loge("Side channels can't be used independently");
922            return false;
923        }
924        return true;
925    }
926
927
928    // Convenience method for the constructor's audio buffer size check.
929    // preconditions:
930    //    mChannelCount is valid
931    //    mAudioFormat is valid
932    // postcondition:
933    //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
934    private void audioBuffSizeCheck(int audioBufferSize) {
935        // NB: this section is only valid with PCM or IEC61937 data.
936        //     To update when supporting compressed formats
937        int frameSizeInBytes;
938        if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
939            frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
940        } else {
941            frameSizeInBytes = 1;
942        }
943        if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
944            throw new IllegalArgumentException("Invalid audio buffer size.");
945        }
946
947        mNativeBufferSizeInBytes = audioBufferSize;
948        mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
949    }
950
951
952    /**
953     * Releases the native AudioTrack resources.
954     */
955    public void release() {
956        // even though native_release() stops the native AudioTrack, we need to stop
957        // AudioTrack subclasses too.
958        try {
959            stop();
960        } catch(IllegalStateException ise) {
961            // don't raise an exception, we're releasing the resources.
962        }
963        native_release();
964        mState = STATE_UNINITIALIZED;
965    }
966
967    @Override
968    protected void finalize() {
969        native_finalize();
970    }
971
972    //--------------------------------------------------------------------------
973    // Getters
974    //--------------------
975    /**
976     * Returns the minimum gain value, which is the constant 0.0.
977     * Gain values less than 0.0 will be clamped to 0.0.
978     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
979     * @return the minimum value, which is the constant 0.0.
980     */
981    static public float getMinVolume() {
982        return GAIN_MIN;
983    }
984
985    /**
986     * Returns the maximum gain value, which is greater than or equal to 1.0.
987     * Gain values greater than the maximum will be clamped to the maximum.
988     * <p>The word "volume" in the API name is historical; this is actually a gain.
989     * expressed as a linear multiplier on sample values, where a maximum value of 1.0
990     * corresponds to a gain of 0 dB (sample values left unmodified).
991     * @return the maximum value, which is greater than or equal to 1.0.
992     */
993    static public float getMaxVolume() {
994        return GAIN_MAX;
995    }
996
997    /**
998     * Returns the configured audio source sample rate in Hz.
999     * The initial source sample rate depends on the constructor parameters,
1000     * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
1001     * If the constructor had a specific sample rate, then the initial sink sample rate is that
1002     * value.
1003     * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
1004     * then the initial sink sample rate is a route-dependent default value based on the source [sic].
1005     */
1006    public int getSampleRate() {
1007        return mSampleRate;
1008    }
1009
1010    /**
1011     * Returns the current playback sample rate rate in Hz.
1012     */
1013    public int getPlaybackRate() {
1014        return native_get_playback_rate();
1015    }
1016
1017    /**
1018     * Returns the current playback parameters.
1019     * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
1020     * @return current {@link PlaybackParams}.
1021     * @throws IllegalStateException if track is not initialized.
1022     */
1023    public @NonNull PlaybackParams getPlaybackParams() {
1024        return native_get_playback_params();
1025    }
1026
1027    /**
1028     * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
1029     * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1030     */
1031    public int getAudioFormat() {
1032        return mAudioFormat;
1033    }
1034
1035    /**
1036     * Returns the type of audio stream this AudioTrack is configured for.
1037     * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
1038     * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
1039     * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
1040     * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
1041     */
1042    public int getStreamType() {
1043        return mStreamType;
1044    }
1045
1046    /**
1047     * Returns the configured channel position mask.
1048     * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
1049     * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
1050     * This method may return {@link AudioFormat#CHANNEL_INVALID} if
1051     * a channel index mask was used. Consider
1052     * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
1053     * which contains both the channel position mask and the channel index mask.
1054     */
1055    public int getChannelConfiguration() {
1056        return mChannelConfiguration;
1057    }
1058
1059    /**
1060     * Returns the configured <code>AudioTrack</code> format.
1061     * @return an {@link AudioFormat} containing the
1062     * <code>AudioTrack</code> parameters at the time of configuration.
1063     */
1064    public @NonNull AudioFormat getFormat() {
1065        AudioFormat.Builder builder = new AudioFormat.Builder()
1066            .setSampleRate(mSampleRate)
1067            .setEncoding(mAudioFormat);
1068        if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
1069            builder.setChannelMask(mChannelConfiguration);
1070        }
1071        if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
1072            builder.setChannelIndexMask(mChannelIndexMask);
1073        }
1074        return builder.build();
1075    }
1076
1077    /**
1078     * Returns the configured number of channels.
1079     */
1080    public int getChannelCount() {
1081        return mChannelCount;
1082    }
1083
1084    /**
1085     * Returns the state of the AudioTrack instance. This is useful after the
1086     * AudioTrack instance has been created to check if it was initialized
1087     * properly. This ensures that the appropriate resources have been acquired.
1088     * @see #STATE_UNINITIALIZED
1089     * @see #STATE_INITIALIZED
1090     * @see #STATE_NO_STATIC_DATA
1091     */
1092    public int getState() {
1093        return mState;
1094    }
1095
1096    /**
1097     * Returns the playback state of the AudioTrack instance.
1098     * @see #PLAYSTATE_STOPPED
1099     * @see #PLAYSTATE_PAUSED
1100     * @see #PLAYSTATE_PLAYING
1101     */
1102    public int getPlayState() {
1103        synchronized (mPlayStateLock) {
1104            return mPlayState;
1105        }
1106    }
1107
1108
1109    /**
1110     * Returns the effective size of the <code>AudioTrack</code> buffer
1111     * that the application writes to.
1112     * <p> This will be less than or equal to the result of
1113     * {@link #getBufferCapacityInFrames()}.
1114     * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
1115     * <p> If the track is subsequently routed to a different output sink, the buffer
1116     * size and capacity may enlarge to accommodate.
1117     * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
1118     * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
1119     * the size of the native <code>AudioTrack</code> buffer in bytes.
1120     * <p> See also {@link AudioManager#getProperty(String)} for key
1121     * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1122     * @return current size in frames of the <code>AudioTrack</code> buffer.
1123     * @throws IllegalStateException if track is not initialized.
1124     */
1125    public int getBufferSizeInFrames() {
1126        return native_get_buffer_size_frames();
1127    }
1128
1129    /**
1130     * Limits the effective size of the <code>AudioTrack</code> buffer
1131     * that the application writes to.
1132     * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
1133     * If a blocking write is used then the write will block until the the data
1134     * can fit within this limit.
1135     * <p>Changing this limit modifies the latency associated with
1136     * the buffer for this track. A smaller size will give lower latency
1137     * but there may be more glitches due to buffer underruns.
1138     * <p>The actual size used may not be equal to this requested size.
1139     * It will be limited to a valid range with a maximum of
1140     * {@link #getBufferCapacityInFrames()}.
1141     * It may also be adjusted slightly for internal reasons.
1142     * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
1143     * will be returned.
1144     * <p>This method is only supported for PCM audio.
1145     * It is not supported for compressed audio tracks.
1146     *
1147     * @param bufferSizeInFrames requested buffer size
1148     * @return the actual buffer size in frames or an error code,
1149     *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1150     * @throws IllegalStateException if track is not initialized.
1151     */
1152    public int setBufferSizeInFrames(int bufferSizeInFrames) {
1153        if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
1154            return ERROR_INVALID_OPERATION;
1155        }
1156        if (bufferSizeInFrames < 0) {
1157            return ERROR_BAD_VALUE;
1158        }
1159        return native_set_buffer_size_frames(bufferSizeInFrames);
1160    }
1161
1162    /**
1163     *  Returns the maximum size of the native <code>AudioTrack</code> buffer.
1164     *  <p> If the track's creation mode is {@link #MODE_STATIC},
1165     *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
1166     *  A static track's native frame count will not change.
1167     *  <p> If the track's creation mode is {@link #MODE_STREAM},
1168     *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
1169     *  For streaming tracks, this value may be rounded up to a larger value if needed by
1170     *  the target output sink, and
1171     *  if the track is subsequently routed to a different output sink, the native
1172     *  frame count may enlarge to accommodate.
1173     *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
1174     *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
1175     *  the size of the native <code>AudioTrack</code> buffer in bytes.
1176     *  <p> See also {@link AudioManager#getProperty(String)} for key
1177     *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1178     *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
1179     *  @throws IllegalStateException if track is not initialized.
1180     */
1181    public int getBufferCapacityInFrames() {
1182        return native_get_buffer_capacity_frames();
1183    }
1184
1185    /**
1186     *  Returns the frame count of the native <code>AudioTrack</code> buffer.
1187     *  @return current size in frames of the <code>AudioTrack</code> buffer.
1188     *  @throws IllegalStateException
1189     *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
1190     */
1191    @Deprecated
1192    protected int getNativeFrameCount() {
1193        return native_get_buffer_capacity_frames();
1194    }
1195
1196    /**
1197     * Returns marker position expressed in frames.
1198     * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
1199     * or zero if marker is disabled.
1200     */
1201    public int getNotificationMarkerPosition() {
1202        return native_get_marker_pos();
1203    }
1204
1205    /**
1206     * Returns the notification update period expressed in frames.
1207     * Zero means that no position update notifications are being delivered.
1208     */
1209    public int getPositionNotificationPeriod() {
1210        return native_get_pos_update_period();
1211    }
1212
1213    /**
1214     * Returns the playback head position expressed in frames.
1215     * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
1216     * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
1217     * This is a continuously advancing counter.  It will wrap (overflow) periodically,
1218     * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
1219     * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
1220     * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
1221     * the total number of frames played since reset,
1222     * <i>not</i> the current offset within the buffer.
1223     */
1224    public int getPlaybackHeadPosition() {
1225        return native_get_position();
1226    }
1227
1228    /**
1229     * Returns this track's estimated latency in milliseconds. This includes the latency due
1230     * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
1231     *
1232     * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
1233     * a better solution.
1234     * @hide
1235     */
1236    public int getLatency() {
1237        return native_get_latency();
1238    }
1239
1240    /**
1241     * Returns the number of underrun occurrences in the application-level write buffer
1242     * since the AudioTrack was created.
1243     * An underrun occurs if the application does not write audio
1244     * data quickly enough, causing the buffer to underflow
1245     * and a potential audio glitch or pop.
1246     * <p>
1247     * Underruns are less likely when buffer sizes are large.
1248     * It may be possible to eliminate underruns by recreating the AudioTrack with
1249     * a larger buffer.
1250     * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
1251     * effective size of the buffer.
1252     */
1253    public int getUnderrunCount() {
1254        return native_get_underrun_count();
1255    }
1256
1257    /**
1258     *  Returns the output sample rate in Hz for the specified stream type.
1259     */
1260    static public int getNativeOutputSampleRate(int streamType) {
1261        return native_get_output_sample_rate(streamType);
1262    }
1263
1264    /**
1265     * Returns the minimum buffer size required for the successful creation of an AudioTrack
1266     * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
1267     * guarantee a smooth playback under load, and higher values should be chosen according to
1268     * the expected frequency at which the buffer will be refilled with additional data to play.
1269     * For example, if you intend to dynamically set the source sample rate of an AudioTrack
1270     * to a higher value than the initial source sample rate, be sure to configure the buffer size
1271     * based on the highest planned sample rate.
1272     * @param sampleRateInHz the source sample rate expressed in Hz.
1273     *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
1274     * @param channelConfig describes the configuration of the audio channels.
1275     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
1276     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
1277     * @param audioFormat the format in which the audio data is represented.
1278     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
1279     *   {@link AudioFormat#ENCODING_PCM_8BIT},
1280     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1281     * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
1282     *   or {@link #ERROR} if unable to query for output properties,
1283     *   or the minimum buffer size expressed in bytes.
1284     */
1285    static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
1286        int channelCount = 0;
1287        switch(channelConfig) {
1288        case AudioFormat.CHANNEL_OUT_MONO:
1289        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1290            channelCount = 1;
1291            break;
1292        case AudioFormat.CHANNEL_OUT_STEREO:
1293        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1294            channelCount = 2;
1295            break;
1296        default:
1297            if (!isMultichannelConfigSupported(channelConfig)) {
1298                loge("getMinBufferSize(): Invalid channel configuration.");
1299                return ERROR_BAD_VALUE;
1300            } else {
1301                channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1302            }
1303        }
1304
1305        if (!AudioFormat.isPublicEncoding(audioFormat)) {
1306            loge("getMinBufferSize(): Invalid audio format.");
1307            return ERROR_BAD_VALUE;
1308        }
1309
1310        // sample rate, note these values are subject to change
1311        // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
1312        if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
1313                (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
1314            loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
1315            return ERROR_BAD_VALUE;
1316        }
1317
1318        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
1319        if (size <= 0) {
1320            loge("getMinBufferSize(): error querying hardware");
1321            return ERROR;
1322        }
1323        else {
1324            return size;
1325        }
1326    }
1327
1328    /**
1329     * Returns the audio session ID.
1330     *
1331     * @return the ID of the audio session this AudioTrack belongs to.
1332     */
1333    public int getAudioSessionId() {
1334        return mSessionId;
1335    }
1336
1337   /**
1338    * Poll for a timestamp on demand.
1339    * <p>
1340    * If you need to track timestamps during initial warmup or after a routing or mode change,
1341    * you should request a new timestamp periodically until the reported timestamps
1342    * show that the frame position is advancing, or until it becomes clear that
1343    * timestamps are unavailable for this route.
1344    * <p>
1345    * After the clock is advancing at a stable rate,
1346    * query for a new timestamp approximately once every 10 seconds to once per minute.
1347    * Calling this method more often is inefficient.
1348    * It is also counter-productive to call this method more often than recommended,
1349    * because the short-term differences between successive timestamp reports are not meaningful.
1350    * If you need a high-resolution mapping between frame position and presentation time,
1351    * consider implementing that at application level, based on low-resolution timestamps.
1352    * <p>
1353    * The audio data at the returned position may either already have been
1354    * presented, or may have not yet been presented but is committed to be presented.
1355    * It is not possible to request the time corresponding to a particular position,
1356    * or to request the (fractional) position corresponding to a particular time.
1357    * If you need such features, consider implementing them at application level.
1358    *
1359    * @param timestamp a reference to a non-null AudioTimestamp instance allocated
1360    *        and owned by caller.
1361    * @return true if a timestamp is available, or false if no timestamp is available.
1362    *         If a timestamp if available,
1363    *         the AudioTimestamp instance is filled in with a position in frame units, together
1364    *         with the estimated time when that frame was presented or is committed to
1365    *         be presented.
1366    *         In the case that no timestamp is available, any supplied instance is left unaltered.
1367    *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
1368    *         or during and immediately after a route change.
1369    *         A timestamp is permanently unavailable for a given route if the route does not support
1370    *         timestamps.  In this case, the approximate frame position can be obtained
1371    *         using {@link #getPlaybackHeadPosition}.
1372    *         However, it may be useful to continue to query for
1373    *         timestamps occasionally, to recover after a route change.
1374    */
1375    // Add this text when the "on new timestamp" API is added:
1376    //   Use if you need to get the most recent timestamp outside of the event callback handler.
1377    public boolean getTimestamp(AudioTimestamp timestamp)
1378    {
1379        if (timestamp == null) {
1380            throw new IllegalArgumentException();
1381        }
1382        // It's unfortunate, but we have to either create garbage every time or use synchronized
1383        long[] longArray = new long[2];
1384        int ret = native_get_timestamp(longArray);
1385        if (ret != SUCCESS) {
1386            return false;
1387        }
1388        timestamp.framePosition = longArray[0];
1389        timestamp.nanoTime = longArray[1];
1390        return true;
1391    }
1392
1393    /**
1394     * Poll for a timestamp on demand.
1395     * <p>
1396     * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
1397     *
1398     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
1399     *        and owned by caller.
1400     * @return {@link #SUCCESS} if a timestamp is available
1401     *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
1402     *         immediately after start/ACTIVE, when the number of frames consumed is less than the
1403     *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
1404     *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
1405     *         for the timestamp.
1406     *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1407     *         needs to be recreated.
1408     *         {@link #ERROR_INVALID_OPERATION} if current route does not support
1409     *         timestamps. In this case, the approximate frame position can be obtained
1410     *         using {@link #getPlaybackHeadPosition}.
1411     *
1412     *         The AudioTimestamp instance is filled in with a position in frame units, together
1413     *         with the estimated time when that frame was presented or is committed to
1414     *         be presented.
1415     * @hide
1416     */
1417     // Add this text when the "on new timestamp" API is added:
1418     //   Use if you need to get the most recent timestamp outside of the event callback handler.
1419     public int getTimestampWithStatus(AudioTimestamp timestamp)
1420     {
1421         if (timestamp == null) {
1422             throw new IllegalArgumentException();
1423         }
1424         // It's unfortunate, but we have to either create garbage every time or use synchronized
1425         long[] longArray = new long[2];
1426         int ret = native_get_timestamp(longArray);
1427         timestamp.framePosition = longArray[0];
1428         timestamp.nanoTime = longArray[1];
1429         return ret;
1430     }
1431
1432    //--------------------------------------------------------------------------
1433    // Initialization / configuration
1434    //--------------------
1435    /**
1436     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1437     * for each periodic playback head position update.
1438     * Notifications will be received in the same thread as the one in which the AudioTrack
1439     * instance was created.
1440     * @param listener
1441     */
1442    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
1443        setPlaybackPositionUpdateListener(listener, null);
1444    }
1445
1446    /**
1447     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1448     * for each periodic playback head position update.
1449     * Use this method to receive AudioTrack events in the Handler associated with another
1450     * thread than the one in which you created the AudioTrack instance.
1451     * @param listener
1452     * @param handler the Handler that will receive the event notification messages.
1453     */
1454    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
1455                                                    Handler handler) {
1456        if (listener != null) {
1457            mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
1458        } else {
1459            mEventHandlerDelegate = null;
1460        }
1461    }
1462
1463
1464    private static float clampGainOrLevel(float gainOrLevel) {
1465        if (Float.isNaN(gainOrLevel)) {
1466            throw new IllegalArgumentException();
1467        }
1468        if (gainOrLevel < GAIN_MIN) {
1469            gainOrLevel = GAIN_MIN;
1470        } else if (gainOrLevel > GAIN_MAX) {
1471            gainOrLevel = GAIN_MAX;
1472        }
1473        return gainOrLevel;
1474    }
1475
1476
1477     /**
1478     * Sets the specified left and right output gain values on the AudioTrack.
1479     * <p>Gain values are clamped to the closed interval [0.0, max] where
1480     * max is the value of {@link #getMaxVolume}.
1481     * A value of 0.0 results in zero gain (silence), and
1482     * a value of 1.0 means unity gain (signal unchanged).
1483     * The default value is 1.0 meaning unity gain.
1484     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1485     * @param leftGain output gain for the left channel.
1486     * @param rightGain output gain for the right channel
1487     * @return error code or success, see {@link #SUCCESS},
1488     *    {@link #ERROR_INVALID_OPERATION}
1489     * @deprecated Applications should use {@link #setVolume} instead, as it
1490     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
1491     */
1492    public int setStereoVolume(float leftGain, float rightGain) {
1493        if (isRestricted()) {
1494            return SUCCESS;
1495        }
1496        if (mState == STATE_UNINITIALIZED) {
1497            return ERROR_INVALID_OPERATION;
1498        }
1499
1500        leftGain = clampGainOrLevel(leftGain);
1501        rightGain = clampGainOrLevel(rightGain);
1502
1503        native_setVolume(leftGain, rightGain);
1504
1505        return SUCCESS;
1506    }
1507
1508
1509    /**
1510     * Sets the specified output gain value on all channels of this track.
1511     * <p>Gain values are clamped to the closed interval [0.0, max] where
1512     * max is the value of {@link #getMaxVolume}.
1513     * A value of 0.0 results in zero gain (silence), and
1514     * a value of 1.0 means unity gain (signal unchanged).
1515     * The default value is 1.0 meaning unity gain.
1516     * <p>This API is preferred over {@link #setStereoVolume}, as it
1517     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
1518     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1519     * @param gain output gain for all channels.
1520     * @return error code or success, see {@link #SUCCESS},
1521     *    {@link #ERROR_INVALID_OPERATION}
1522     */
1523    public int setVolume(float gain) {
1524        return setStereoVolume(gain, gain);
1525    }
1526
1527
1528    /**
1529     * Sets the playback sample rate for this track. This sets the sampling rate at which
1530     * the audio data will be consumed and played back
1531     * (as set by the sampleRateInHz parameter in the
1532     * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
1533     * not the original sampling rate of the
1534     * content. For example, setting it to half the sample rate of the content will cause the
1535     * playback to last twice as long, but will also result in a pitch shift down by one octave.
1536     * The valid sample rate range is from 1 Hz to twice the value returned by
1537     * {@link #getNativeOutputSampleRate(int)}.
1538     * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
1539     * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
1540     * for playback of content of differing sample rate,
1541     * but with identical encoding and channel mask.
1542     * @param sampleRateInHz the sample rate expressed in Hz
1543     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1544     *    {@link #ERROR_INVALID_OPERATION}
1545     */
1546    public int setPlaybackRate(int sampleRateInHz) {
1547        if (mState != STATE_INITIALIZED) {
1548            return ERROR_INVALID_OPERATION;
1549        }
1550        if (sampleRateInHz <= 0) {
1551            return ERROR_BAD_VALUE;
1552        }
1553        return native_set_playback_rate(sampleRateInHz);
1554    }
1555
1556
1557    /**
1558     * Sets the playback parameters.
1559     * This method returns failure if it cannot apply the playback parameters.
1560     * One possible cause is that the parameters for speed or pitch are out of range.
1561     * Another possible cause is that the <code>AudioTrack</code> is streaming
1562     * (see {@link #MODE_STREAM}) and the
1563     * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
1564     * on configuration must be larger than the speed multiplied by the minimum size
1565     * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
1566     * @param params see {@link PlaybackParams}. In particular,
1567     * speed, pitch, and audio mode should be set.
1568     * @throws IllegalArgumentException if the parameters are invalid or not accepted.
1569     * @throws IllegalStateException if track is not initialized.
1570     */
1571    public void setPlaybackParams(@NonNull PlaybackParams params) {
1572        if (params == null) {
1573            throw new IllegalArgumentException("params is null");
1574        }
1575        native_set_playback_params(params);
1576    }
1577
1578
1579    /**
1580     * Sets the position of the notification marker.  At most one marker can be active.
1581     * @param markerInFrames marker position in wrapping frame units similar to
1582     * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
1583     * To set a marker at a position which would appear as zero due to wraparound,
1584     * a workaround is to use a non-zero position near zero, such as -1 or 1.
1585     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1586     *  {@link #ERROR_INVALID_OPERATION}
1587     */
1588    public int setNotificationMarkerPosition(int markerInFrames) {
1589        if (mState == STATE_UNINITIALIZED) {
1590            return ERROR_INVALID_OPERATION;
1591        }
1592        return native_set_marker_pos(markerInFrames);
1593    }
1594
1595
1596    /**
1597     * Sets the period for the periodic notification event.
1598     * @param periodInFrames update period expressed in frames.
1599     * Zero period means no position updates.  A negative period is not allowed.
1600     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
1601     */
1602    public int setPositionNotificationPeriod(int periodInFrames) {
1603        if (mState == STATE_UNINITIALIZED) {
1604            return ERROR_INVALID_OPERATION;
1605        }
1606        return native_set_pos_update_period(periodInFrames);
1607    }
1608
1609
1610    /**
1611     * Sets the playback head position within the static buffer.
1612     * The track must be stopped or paused for the position to be changed,
1613     * and must use the {@link #MODE_STATIC} mode.
1614     * @param positionInFrames playback head position within buffer, expressed in frames.
1615     * Zero corresponds to start of buffer.
1616     * The position must not be greater than the buffer size in frames, or negative.
1617     * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
1618     * the position values have different meanings.
1619     * <br>
1620     * If looping is currently enabled and the new position is greater than or equal to the
1621     * loop end marker, the behavior varies by API level:
1622     * as of {@link android.os.Build.VERSION_CODES#M},
1623     * the looping is first disabled and then the position is set.
1624     * For earlier API levels, the behavior is unspecified.
1625     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1626     *    {@link #ERROR_INVALID_OPERATION}
1627     */
1628    public int setPlaybackHeadPosition(int positionInFrames) {
1629        if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1630                getPlayState() == PLAYSTATE_PLAYING) {
1631            return ERROR_INVALID_OPERATION;
1632        }
1633        if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
1634            return ERROR_BAD_VALUE;
1635        }
1636        return native_set_position(positionInFrames);
1637    }
1638
1639    /**
1640     * Sets the loop points and the loop count. The loop can be infinite.
1641     * Similarly to setPlaybackHeadPosition,
1642     * the track must be stopped or paused for the loop points to be changed,
1643     * and must use the {@link #MODE_STATIC} mode.
1644     * @param startInFrames loop start marker expressed in frames.
1645     * Zero corresponds to start of buffer.
1646     * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1647     * @param endInFrames loop end marker expressed in frames.
1648     * The total buffer size in frames corresponds to end of buffer.
1649     * The end marker must not be greater than the buffer size in frames.
1650     * For looping, the end marker must not be less than or equal to the start marker,
1651     * but to disable looping
1652     * it is permitted for start marker, end marker, and loop count to all be 0.
1653     * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
1654     * If the loop period (endInFrames - startInFrames) is too small for the implementation to
1655     * support,
1656     * {@link #ERROR_BAD_VALUE} is returned.
1657     * The loop range is the interval [startInFrames, endInFrames).
1658     * <br>
1659     * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
1660     * unless it is greater than or equal to the loop end marker, in which case
1661     * it is forced to the loop start marker.
1662     * For earlier API levels, the effect on position is unspecified.
1663     * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
1664     *    A value of -1 means infinite looping, and 0 disables looping.
1665     *    A value of positive N means to "loop" (go back) N times.  For example,
1666     *    a value of one means to play the region two times in total.
1667     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1668     *    {@link #ERROR_INVALID_OPERATION}
1669     */
1670    public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1671        if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1672                getPlayState() == PLAYSTATE_PLAYING) {
1673            return ERROR_INVALID_OPERATION;
1674        }
1675        if (loopCount == 0) {
1676            ;   // explicitly allowed as an exception to the loop region range check
1677        } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1678                startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1679            return ERROR_BAD_VALUE;
1680        }
1681        return native_set_loop(startInFrames, endInFrames, loopCount);
1682    }
1683
1684    /**
1685     * Sets the initialization state of the instance. This method was originally intended to be used
1686     * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1687     * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1688     * @param state the state of the AudioTrack instance
1689     * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1690     */
1691    @Deprecated
1692    protected void setState(int state) {
1693        mState = state;
1694    }
1695
1696
1697    //---------------------------------------------------------
1698    // Transport control methods
1699    //--------------------
1700    /**
1701     * Starts playing an AudioTrack.
1702     * <p>
1703     * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
1704     * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
1705     * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
1706     * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
1707     * play().
1708     * <p>
1709     * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
1710     * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
1711     * If you don't call write() first, or if you call write() but with an insufficient amount of
1712     * data, then the track will be in underrun state at play().  In this case,
1713     * playback will not actually start playing until the data path is filled to a
1714     * device-specific minimum level.  This requirement for the path to be filled
1715     * to a minimum level is also true when resuming audio playback after calling stop().
1716     * Similarly the buffer will need to be filled up again after
1717     * the track underruns due to failure to call write() in a timely manner with sufficient data.
1718     * For portability, an application should prime the data path to the maximum allowed
1719     * by writing data until the write() method returns a short transfer count.
1720     * This allows play() to start immediately, and reduces the chance of underrun.
1721     *
1722     * @throws IllegalStateException if the track isn't properly initialized
1723     */
1724    public void play()
1725    throws IllegalStateException {
1726        if (mState != STATE_INITIALIZED) {
1727            throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1728        }
1729        if (isRestricted()) {
1730            setVolume(0);
1731        }
1732        synchronized(mPlayStateLock) {
1733            native_start();
1734            mPlayState = PLAYSTATE_PLAYING;
1735        }
1736    }
1737
1738    private boolean isRestricted() {
1739        if ((mAttributes.getAllFlags() & AudioAttributes.FLAG_BYPASS_INTERRUPTION_POLICY) != 0) {
1740            return false;
1741        }
1742        try {
1743            final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType);
1744            final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage,
1745                    Process.myUid(), ActivityThread.currentPackageName());
1746            return mode != AppOpsManager.MODE_ALLOWED;
1747        } catch (RemoteException e) {
1748            return false;
1749        }
1750    }
1751
1752    /**
1753     * Stops playing the audio data.
1754     * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
1755     * after the last buffer that was written has been played. For an immediate stop, use
1756     * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
1757     * back yet.
1758     * @throws IllegalStateException
1759     */
1760    public void stop()
1761    throws IllegalStateException {
1762        if (mState != STATE_INITIALIZED) {
1763            throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
1764        }
1765
1766        // stop playing
1767        synchronized(mPlayStateLock) {
1768            native_stop();
1769            mPlayState = PLAYSTATE_STOPPED;
1770            mAvSyncHeader = null;
1771            mAvSyncBytesRemaining = 0;
1772        }
1773    }
1774
1775    /**
1776     * Pauses the playback of the audio data. Data that has not been played
1777     * back will not be discarded. Subsequent calls to {@link #play} will play
1778     * this data back. See {@link #flush()} to discard this data.
1779     *
1780     * @throws IllegalStateException
1781     */
1782    public void pause()
1783    throws IllegalStateException {
1784        if (mState != STATE_INITIALIZED) {
1785            throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
1786        }
1787        //logd("pause()");
1788
1789        // pause playback
1790        synchronized(mPlayStateLock) {
1791            native_pause();
1792            mPlayState = PLAYSTATE_PAUSED;
1793        }
1794    }
1795
1796
1797    //---------------------------------------------------------
1798    // Audio data supply
1799    //--------------------
1800
1801    /**
1802     * Flushes the audio data currently queued for playback. Any data that has
1803     * been written but not yet presented will be discarded.  No-op if not stopped or paused,
1804     * or if the track's creation mode is not {@link #MODE_STREAM}.
1805     * <BR> Note that although data written but not yet presented is discarded, there is no
1806     * guarantee that all of the buffer space formerly used by that data
1807     * is available for a subsequent write.
1808     * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
1809     * less than or equal to the total buffer size
1810     * may return a short actual transfer count.
1811     */
1812    public void flush() {
1813        if (mState == STATE_INITIALIZED) {
1814            // flush the data in native layer
1815            native_flush();
1816            mAvSyncHeader = null;
1817            mAvSyncBytesRemaining = 0;
1818        }
1819
1820    }
1821
1822    /**
1823     * Writes the audio data to the audio sink for playback (streaming mode),
1824     * or copies audio data for later playback (static buffer mode).
1825     * The format specified in the AudioTrack constructor should be
1826     * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
1827     * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
1828     * <p>
1829     * In streaming mode, the write will normally block until all the data has been enqueued for
1830     * playback, and will return a full transfer count.  However, if the track is stopped or paused
1831     * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
1832     * occurs during the write, then the write may return a short transfer count.
1833     * <p>
1834     * In static buffer mode, copies the data to the buffer starting at offset 0.
1835     * Note that the actual playback of this data might occur after this function returns.
1836     *
1837     * @param audioData the array that holds the data to play.
1838     * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
1839     *    starts.
1840     *    Must not be negative, or cause the data access to go out of bounds of the array.
1841     * @param sizeInBytes the number of bytes to write in audioData after the offset.
1842     *    Must not be negative, or cause the data access to go out of bounds of the array.
1843     * @return zero or the positive number of bytes that were written, or
1844     *    {@link #ERROR_INVALID_OPERATION}
1845     *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1846     *    the parameters don't resolve to valid data and indexes, or
1847     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1848     *    needs to be recreated.
1849     *    The dead object error code is not returned if some data was successfully transferred.
1850     *    In this case, the error is returned at the next write().
1851     *    The number of bytes will be a multiple of the frame size in bytes
1852     *    not to exceed sizeInBytes.
1853     *
1854     * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
1855     * set to  {@link #WRITE_BLOCKING}.
1856     */
1857    public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
1858        return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
1859    }
1860
1861    /**
1862     * Writes the audio data to the audio sink for playback (streaming mode),
1863     * or copies audio data for later playback (static buffer mode).
1864     * The format specified in the AudioTrack constructor should be
1865     * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
1866     * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
1867     * <p>
1868     * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
1869     * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
1870     * for playback, and will return a full transfer count.  However, if the write mode is
1871     * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
1872     * interrupts the write by calling stop or pause, or an I/O error
1873     * occurs during the write, then the write may return a short transfer count.
1874     * <p>
1875     * In static buffer mode, copies the data to the buffer starting at offset 0,
1876     * and the write mode is ignored.
1877     * Note that the actual playback of this data might occur after this function returns.
1878     *
1879     * @param audioData the array that holds the data to play.
1880     * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
1881     *    starts.
1882     *    Must not be negative, or cause the data access to go out of bounds of the array.
1883     * @param sizeInBytes the number of bytes to write in audioData after the offset.
1884     *    Must not be negative, or cause the data access to go out of bounds of the array.
1885     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1886     *     effect in static mode.
1887     *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1888     *         to the audio sink.
1889     *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1890     *     queuing as much audio data for playback as possible without blocking.
1891     * @return zero or the positive number of bytes that were written, or
1892     *    {@link #ERROR_INVALID_OPERATION}
1893     *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1894     *    the parameters don't resolve to valid data and indexes, or
1895     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1896     *    needs to be recreated.
1897     *    The dead object error code is not returned if some data was successfully transferred.
1898     *    In this case, the error is returned at the next write().
1899     *    The number of bytes will be a multiple of the frame size in bytes
1900     *    not to exceed sizeInBytes.
1901     */
1902    public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
1903            @WriteMode int writeMode) {
1904
1905        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1906            return ERROR_INVALID_OPERATION;
1907        }
1908
1909        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1910            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1911            return ERROR_BAD_VALUE;
1912        }
1913
1914        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
1915                || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
1916                || (offsetInBytes + sizeInBytes > audioData.length)) {
1917            return ERROR_BAD_VALUE;
1918        }
1919
1920        int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
1921                writeMode == WRITE_BLOCKING);
1922
1923        if ((mDataLoadMode == MODE_STATIC)
1924                && (mState == STATE_NO_STATIC_DATA)
1925                && (ret > 0)) {
1926            // benign race with respect to other APIs that read mState
1927            mState = STATE_INITIALIZED;
1928        }
1929
1930        return ret;
1931    }
1932
1933    /**
1934     * Writes the audio data to the audio sink for playback (streaming mode),
1935     * or copies audio data for later playback (static buffer mode).
1936     * The format specified in the AudioTrack constructor should be
1937     * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
1938     * <p>
1939     * In streaming mode, the write will normally block until all the data has been enqueued for
1940     * playback, and will return a full transfer count.  However, if the track is stopped or paused
1941     * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
1942     * occurs during the write, then the write may return a short transfer count.
1943     * <p>
1944     * In static buffer mode, copies the data to the buffer starting at offset 0.
1945     * Note that the actual playback of this data might occur after this function returns.
1946     *
1947     * @param audioData the array that holds the data to play.
1948     * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1949     *     starts.
1950     *    Must not be negative, or cause the data access to go out of bounds of the array.
1951     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1952     *    Must not be negative, or cause the data access to go out of bounds of the array.
1953     * @return zero or the positive number of shorts that were written, or
1954     *    {@link #ERROR_INVALID_OPERATION}
1955     *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1956     *    the parameters don't resolve to valid data and indexes, or
1957     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1958     *    needs to be recreated.
1959     *    The dead object error code is not returned if some data was successfully transferred.
1960     *    In this case, the error is returned at the next write().
1961     *    The number of shorts will be a multiple of the channel count not to exceed sizeInShorts.
1962     *
1963     * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
1964     * set to  {@link #WRITE_BLOCKING}.
1965     */
1966    public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
1967        return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
1968    }
1969
1970    /**
1971     * Writes the audio data to the audio sink for playback (streaming mode),
1972     * or copies audio data for later playback (static buffer mode).
1973     * The format specified in the AudioTrack constructor should be
1974     * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
1975     * <p>
1976     * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
1977     * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
1978     * for playback, and will return a full transfer count.  However, if the write mode is
1979     * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
1980     * interrupts the write by calling stop or pause, or an I/O error
1981     * occurs during the write, then the write may return a short transfer count.
1982     * <p>
1983     * In static buffer mode, copies the data to the buffer starting at offset 0.
1984     * Note that the actual playback of this data might occur after this function returns.
1985     *
1986     * @param audioData the array that holds the data to write.
1987     * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
1988     *     starts.
1989     *    Must not be negative, or cause the data access to go out of bounds of the array.
1990     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1991     *    Must not be negative, or cause the data access to go out of bounds of the array.
1992     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1993     *     effect in static mode.
1994     *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1995     *         to the audio sink.
1996     *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1997     *     queuing as much audio data for playback as possible without blocking.
1998     * @return zero or the positive number of shorts that were written, or
1999     *    {@link #ERROR_INVALID_OPERATION}
2000     *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
2001     *    the parameters don't resolve to valid data and indexes, or
2002     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2003     *    needs to be recreated.
2004     *    The dead object error code is not returned if some data was successfully transferred.
2005     *    In this case, the error is returned at the next write().
2006     *    The number of shorts will be a multiple of the channel count not to exceed sizeInShorts.
2007     */
2008    public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
2009            @WriteMode int writeMode) {
2010
2011        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
2012            return ERROR_INVALID_OPERATION;
2013        }
2014
2015        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2016            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2017            return ERROR_BAD_VALUE;
2018        }
2019
2020        if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
2021                || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
2022                || (offsetInShorts + sizeInShorts > audioData.length)) {
2023            return ERROR_BAD_VALUE;
2024        }
2025
2026        int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
2027                writeMode == WRITE_BLOCKING);
2028
2029        if ((mDataLoadMode == MODE_STATIC)
2030                && (mState == STATE_NO_STATIC_DATA)
2031                && (ret > 0)) {
2032            // benign race with respect to other APIs that read mState
2033            mState = STATE_INITIALIZED;
2034        }
2035
2036        return ret;
2037    }
2038
2039    /**
2040     * Writes the audio data to the audio sink for playback (streaming mode),
2041     * or copies audio data for later playback (static buffer mode).
2042     * The format specified in the AudioTrack constructor should be
2043     * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
2044     * <p>
2045     * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2046     * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2047     * for playback, and will return a full transfer count.  However, if the write mode is
2048     * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2049     * interrupts the write by calling stop or pause, or an I/O error
2050     * occurs during the write, then the write may return a short transfer count.
2051     * <p>
2052     * In static buffer mode, copies the data to the buffer starting at offset 0,
2053     * and the write mode is ignored.
2054     * Note that the actual playback of this data might occur after this function returns.
2055     *
2056     * @param audioData the array that holds the data to write.
2057     *     The implementation does not clip for sample values within the nominal range
2058     *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
2059     *     less than or equal to unity (1.0f), and in the absence of post-processing effects
2060     *     that could add energy, such as reverb.  For the convenience of applications
2061     *     that compute samples using filters with non-unity gain,
2062     *     sample values +3 dB beyond the nominal range are permitted.
2063     *     However such values may eventually be limited or clipped, depending on various gains
2064     *     and later processing in the audio path.  Therefore applications are encouraged
2065     *     to provide samples values within the nominal range.
2066     * @param offsetInFloats the offset, expressed as a number of floats,
2067     *     in audioData where the data to write starts.
2068     *    Must not be negative, or cause the data access to go out of bounds of the array.
2069     * @param sizeInFloats the number of floats to write in audioData after the offset.
2070     *    Must not be negative, or cause the data access to go out of bounds of the array.
2071     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2072     *     effect in static mode.
2073     *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2074     *         to the audio sink.
2075     *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2076     *     queuing as much audio data for playback as possible without blocking.
2077     * @return zero or the positive number of floats that were written, or
2078     *    {@link #ERROR_INVALID_OPERATION}
2079     *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
2080     *    the parameters don't resolve to valid data and indexes, or
2081     *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2082     *    needs to be recreated.
2083     *    The dead object error code is not returned if some data was successfully transferred.
2084     *    In this case, the error is returned at the next write().
2085     *    The number of floats will be a multiple of the channel count not to exceed sizeInFloats.
2086     */
2087    public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
2088            @WriteMode int writeMode) {
2089
2090        if (mState == STATE_UNINITIALIZED) {
2091            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2092            return ERROR_INVALID_OPERATION;
2093        }
2094
2095        if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
2096            Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
2097            return ERROR_INVALID_OPERATION;
2098        }
2099
2100        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2101            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2102            return ERROR_BAD_VALUE;
2103        }
2104
2105        if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
2106                || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
2107                || (offsetInFloats + sizeInFloats > audioData.length)) {
2108            Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
2109            return ERROR_BAD_VALUE;
2110        }
2111
2112        int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
2113                writeMode == WRITE_BLOCKING);
2114
2115        if ((mDataLoadMode == MODE_STATIC)
2116                && (mState == STATE_NO_STATIC_DATA)
2117                && (ret > 0)) {
2118            // benign race with respect to other APIs that read mState
2119            mState = STATE_INITIALIZED;
2120        }
2121
2122        return ret;
2123    }
2124
2125
2126    /**
2127     * Writes the audio data to the audio sink for playback (streaming mode),
2128     * or copies audio data for later playback (static buffer mode).
2129     * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
2130     * <p>
2131     * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2132     * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2133     * for playback, and will return a full transfer count.  However, if the write mode is
2134     * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2135     * interrupts the write by calling stop or pause, or an I/O error
2136     * occurs during the write, then the write may return a short transfer count.
2137     * <p>
2138     * In static buffer mode, copies the data to the buffer starting at offset 0,
2139     * and the write mode is ignored.
2140     * Note that the actual playback of this data might occur after this function returns.
2141     *
2142     * @param audioData the buffer that holds the data to write, starting at the position reported
2143     *     by <code>audioData.position()</code>.
2144     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
2145     *     have been advanced to reflect the amount of data that was successfully written to
2146     *     the AudioTrack.
2147     * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
2148     *     that the number of bytes requested be a multiple of the frame size (sample size in
2149     *     bytes multiplied by the channel count).
2150     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
2151     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2152     *     effect in static mode.
2153     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2154     *         to the audio sink.
2155     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2156     *     queuing as much audio data for playback as possible without blocking.
2157     * @return zero or the positive number of bytes that were written, or
2158     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or
2159     *     {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2160     *     needs to be recreated.
2161     *     The dead object error code is not returned if some data was successfully transferred.
2162     *     In this case, the error is returned at the next write().
2163     */
2164    public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
2165            @WriteMode int writeMode) {
2166
2167        if (mState == STATE_UNINITIALIZED) {
2168            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2169            return ERROR_INVALID_OPERATION;
2170        }
2171
2172        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2173            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2174            return ERROR_BAD_VALUE;
2175        }
2176
2177        if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
2178            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
2179            return ERROR_BAD_VALUE;
2180        }
2181
2182        int ret = 0;
2183        if (audioData.isDirect()) {
2184            ret = native_write_native_bytes(audioData,
2185                    audioData.position(), sizeInBytes, mAudioFormat,
2186                    writeMode == WRITE_BLOCKING);
2187        } else {
2188            ret = native_write_byte(NioUtils.unsafeArray(audioData),
2189                    NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
2190                    sizeInBytes, mAudioFormat,
2191                    writeMode == WRITE_BLOCKING);
2192        }
2193
2194        if ((mDataLoadMode == MODE_STATIC)
2195                && (mState == STATE_NO_STATIC_DATA)
2196                && (ret > 0)) {
2197            // benign race with respect to other APIs that read mState
2198            mState = STATE_INITIALIZED;
2199        }
2200
2201        if (ret > 0) {
2202            audioData.position(audioData.position() + ret);
2203        }
2204
2205        return ret;
2206    }
2207
2208    /**
2209     * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
2210     * The blocking behavior will depend on the write mode.
2211     * @param audioData the buffer that holds the data to write, starting at the position reported
2212     *     by <code>audioData.position()</code>.
2213     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
2214     *     have been advanced to reflect the amount of data that was successfully written to
2215     *     the AudioTrack.
2216     * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
2217     *     that the number of bytes requested be a multiple of the frame size (sample size in
2218     *     bytes multiplied by the channel count).
2219     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
2220     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
2221     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2222     *         to the audio sink.
2223     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2224     *     queuing as much audio data for playback as possible without blocking.
2225     * @param timestamp The timestamp of the first decodable audio frame in the provided audioData.
2226     * @return zero or a positive number of bytes that were written, or
2227     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or
2228     *     {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2229     *     needs to be recreated.
2230     *     The dead object error code is not returned if some data was successfully transferred.
2231     *     In this case, the error is returned at the next write().
2232     */
2233    public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
2234            @WriteMode int writeMode, long timestamp) {
2235
2236        if (mState == STATE_UNINITIALIZED) {
2237            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2238            return ERROR_INVALID_OPERATION;
2239        }
2240
2241        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2242            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2243            return ERROR_BAD_VALUE;
2244        }
2245
2246        if (mDataLoadMode != MODE_STREAM) {
2247            Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
2248            return ERROR_INVALID_OPERATION;
2249        }
2250
2251        if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
2252            Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
2253            return write(audioData, sizeInBytes, writeMode);
2254        }
2255
2256        if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
2257            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
2258            return ERROR_BAD_VALUE;
2259        }
2260
2261        // create timestamp header if none exists
2262        if (mAvSyncHeader == null) {
2263            mAvSyncHeader = ByteBuffer.allocate(16);
2264            mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
2265            mAvSyncHeader.putInt(0x55550001);
2266            mAvSyncHeader.putInt(sizeInBytes);
2267            mAvSyncHeader.putLong(timestamp);
2268            mAvSyncHeader.position(0);
2269            mAvSyncBytesRemaining = sizeInBytes;
2270        }
2271
2272        // write timestamp header if not completely written already
2273        int ret = 0;
2274        if (mAvSyncHeader.remaining() != 0) {
2275            ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
2276            if (ret < 0) {
2277                Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
2278                mAvSyncHeader = null;
2279                mAvSyncBytesRemaining = 0;
2280                return ret;
2281            }
2282            if (mAvSyncHeader.remaining() > 0) {
2283                Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
2284                return 0;
2285            }
2286        }
2287
2288        // write audio data
2289        int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
2290        ret = write(audioData, sizeToWrite, writeMode);
2291        if (ret < 0) {
2292            Log.e(TAG, "AudioTrack.write() could not write audio data!");
2293            mAvSyncHeader = null;
2294            mAvSyncBytesRemaining = 0;
2295            return ret;
2296        }
2297
2298        mAvSyncBytesRemaining -= ret;
2299        if (mAvSyncBytesRemaining == 0) {
2300            mAvSyncHeader = null;
2301        }
2302
2303        return ret;
2304    }
2305
2306
2307    /**
2308     * Sets the playback head position within the static buffer to zero,
2309     * that is it rewinds to start of static buffer.
2310     * The track must be stopped or paused, and
2311     * the track's creation mode must be {@link #MODE_STATIC}.
2312     * <p>
2313     * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
2314     * {@link #getPlaybackHeadPosition()} to zero.
2315     * For earlier API levels, the reset behavior is unspecified.
2316     * <p>
2317     * Use {@link #setPlaybackHeadPosition(int)} with a zero position
2318     * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
2319     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2320     *  {@link #ERROR_INVALID_OPERATION}
2321     */
2322    public int reloadStaticData() {
2323        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
2324            return ERROR_INVALID_OPERATION;
2325        }
2326        return native_reload_static();
2327    }
2328
2329    //--------------------------------------------------------------------------
2330    // Audio effects management
2331    //--------------------
2332
2333    /**
2334     * Attaches an auxiliary effect to the audio track. A typical auxiliary
2335     * effect is a reverberation effect which can be applied on any sound source
2336     * that directs a certain amount of its energy to this effect. This amount
2337     * is defined by setAuxEffectSendLevel().
2338     * {@see #setAuxEffectSendLevel(float)}.
2339     * <p>After creating an auxiliary effect (e.g.
2340     * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
2341     * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
2342     * this method to attach the audio track to the effect.
2343     * <p>To detach the effect from the audio track, call this method with a
2344     * null effect id.
2345     *
2346     * @param effectId system wide unique id of the effect to attach
2347     * @return error code or success, see {@link #SUCCESS},
2348     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
2349     */
2350    public int attachAuxEffect(int effectId) {
2351        if (mState == STATE_UNINITIALIZED) {
2352            return ERROR_INVALID_OPERATION;
2353        }
2354        return native_attachAuxEffect(effectId);
2355    }
2356
2357    /**
2358     * Sets the send level of the audio track to the attached auxiliary effect
2359     * {@link #attachAuxEffect(int)}.  Effect levels
2360     * are clamped to the closed interval [0.0, max] where
2361     * max is the value of {@link #getMaxVolume}.
2362     * A value of 0.0 results in no effect, and a value of 1.0 is full send.
2363     * <p>By default the send level is 0.0f, so even if an effect is attached to the player
2364     * this method must be called for the effect to be applied.
2365     * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
2366     * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
2367     * so an appropriate conversion from linear UI input x to level is:
2368     * x == 0 -&gt; level = 0
2369     * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
2370     *
2371     * @param level linear send level
2372     * @return error code or success, see {@link #SUCCESS},
2373     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
2374     */
2375    public int setAuxEffectSendLevel(float level) {
2376        if (isRestricted()) {
2377            return SUCCESS;
2378        }
2379        if (mState == STATE_UNINITIALIZED) {
2380            return ERROR_INVALID_OPERATION;
2381        }
2382        level = clampGainOrLevel(level);
2383        int err = native_setAuxEffectSendLevel(level);
2384        return err == 0 ? SUCCESS : ERROR;
2385    }
2386
2387    //--------------------------------------------------------------------------
2388    // Explicit Routing
2389    //--------------------
2390    private AudioDeviceInfo mPreferredDevice = null;
2391
2392    /**
2393     * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
2394     * the output from this AudioTrack.
2395     * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
2396     *  If deviceInfo is null, default routing is restored.
2397     * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
2398     * does not correspond to a valid audio output device.
2399     */
2400    public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
2401        // Do some validation....
2402        if (deviceInfo != null && !deviceInfo.isSink()) {
2403            return false;
2404        }
2405        int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
2406        boolean status = native_setOutputDevice(preferredDeviceId);
2407        if (status == true) {
2408            synchronized (this) {
2409                mPreferredDevice = deviceInfo;
2410            }
2411        }
2412        return status;
2413    }
2414
2415    /**
2416     * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
2417     * is not guaranteed to correspond to the actual device being used for playback.
2418     */
2419    public AudioDeviceInfo getPreferredDevice() {
2420        synchronized (this) {
2421            return mPreferredDevice;
2422        }
2423    }
2424
2425    /**
2426     * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
2427     * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
2428     * <code>getRoutedDevice()</code> will return null.
2429     */
2430    public AudioDeviceInfo getRoutedDevice() {
2431        int deviceId = native_getRoutedDeviceId();
2432        if (deviceId == 0) {
2433            return null;
2434        }
2435        AudioDeviceInfo[] devices =
2436                AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS);
2437        for (int i = 0; i < devices.length; i++) {
2438            if (devices[i].getId() == deviceId) {
2439                return devices[i];
2440            }
2441        }
2442        return null;
2443    }
2444
2445    /*
2446     * Call BEFORE adding a routing callback handler.
2447     */
2448    private void testEnableNativeRoutingCallbacks() {
2449        if (mRoutingChangeListeners.size() == 0 && mNewRoutingChangeListeners.size() == 0) {
2450            native_enableDeviceCallback();
2451        }
2452    }
2453
2454    /*
2455     * Call AFTER removing a routing callback handler.
2456     */
2457    private void testDisableNativeRoutingCallbacks() {
2458        if (mRoutingChangeListeners.size() == 0 && mNewRoutingChangeListeners.size() == 0) {
2459            native_disableDeviceCallback();
2460        }
2461    }
2462
2463    //--------------------------------------------------------------------------
2464    // >= "N" (Re)Routing Info
2465    //--------------------
2466    /**
2467     * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
2468     * {@link AudioTrack#addOnRoutingListener(AudioRouting.OnRoutingChangedListener,
2469     *          android.os.Handler)}
2470     * by an app to receive (re)routing notifications.
2471     */
2472   private ArrayMap<AudioRouting.OnRoutingChangedListener, NativeNewRoutingEventHandlerDelegate>
2473    mNewRoutingChangeListeners =
2474        new ArrayMap<AudioRouting.OnRoutingChangedListener, NativeNewRoutingEventHandlerDelegate>();
2475
2476   /**
2477    * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
2478    * changes on this AudioTrack.
2479    * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
2480    * notifications of rerouting events.
2481    * @param handler  Specifies the {@link Handler} object for the thread on which to execute
2482    * the callback. If <code>null</code>, the {@link Handler} associated with the main
2483    * {@link Looper} will be used.
2484    */
2485    public void addOnRoutingListener(AudioRouting.OnRoutingChangedListener listener,
2486            Handler handler) {
2487        if (listener != null && !mNewRoutingChangeListeners.containsKey(listener)) {
2488            synchronized (mNewRoutingChangeListeners) {
2489                testEnableNativeRoutingCallbacks();
2490                mNewRoutingChangeListeners.put(
2491                    listener, new NativeNewRoutingEventHandlerDelegate(this, listener,
2492                            handler != null ? handler : new Handler(mInitializationLooper)));
2493            }
2494        }
2495    }
2496
2497    /**
2498     * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
2499     * to receive rerouting notifications.
2500     * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
2501     * to remove.
2502     */
2503    public void removeOnRoutingListener(AudioRouting.OnRoutingChangedListener listener) {
2504        if (mNewRoutingChangeListeners.containsKey(listener)) {
2505            mNewRoutingChangeListeners.remove(listener);
2506        }
2507        testDisableNativeRoutingCallbacks();
2508    }
2509
2510    //--------------------------------------------------------------------------
2511    // Marshmallow (Re)Routing Info
2512    //--------------------
2513    /**
2514     * Defines the interface by which applications can receive notifications of routing
2515     * changes for the associated {@link AudioTrack}.
2516     */
2517    @Deprecated
2518    public interface OnRoutingChangedListener {
2519        /**
2520         * Called when the routing of an AudioTrack changes from either and explicit or
2521         * policy rerouting.  Use {@link #getRoutedDevice()} to retrieve the newly routed-to
2522         * device.
2523         */
2524        @Deprecated
2525        public void onRoutingChanged(AudioTrack audioTrack);
2526    }
2527
2528    /**
2529     * The list of AudioTrack.OnRoutingChangedListener interfaces added (with
2530     * {@link AudioTrack#addOnRoutingChangedListener(OnRoutingChangedListener, android.os.Handler)}
2531     * by an app to receive (re)routing notifications.
2532     */
2533    private ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate>
2534        mRoutingChangeListeners =
2535            new ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate>();
2536
2537    /**
2538     * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
2539     * on this AudioTrack.
2540     * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
2541     * of rerouting events.
2542     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
2543     * the callback. If <code>null</code>, the {@link Handler} associated with the main
2544     * {@link Looper} will be used.
2545     */
2546    @Deprecated
2547    public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
2548            android.os.Handler handler) {
2549        if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
2550            synchronized (mRoutingChangeListeners) {
2551                testEnableNativeRoutingCallbacks();
2552                mRoutingChangeListeners.put(
2553                    listener, new NativeRoutingEventHandlerDelegate(this, listener,
2554                            handler != null ? handler : new Handler(mInitializationLooper)));
2555            }
2556        }
2557    }
2558
2559    /**
2560     * Removes an {@link OnRoutingChangedListener} which has been previously added
2561     * to receive rerouting notifications.
2562     * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
2563     */
2564    @Deprecated
2565    public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
2566        synchronized (mRoutingChangeListeners) {
2567            if (mRoutingChangeListeners.containsKey(listener)) {
2568                mRoutingChangeListeners.remove(listener);
2569            }
2570            testDisableNativeRoutingCallbacks();
2571        }
2572    }
2573
2574    /**
2575     * Sends device list change notification to all listeners.
2576     */
2577    private void broadcastRoutingChange() {
2578        AudioManager.resetAudioPortGeneration();
2579
2580        // Marshmallow Routing
2581        Collection<NativeRoutingEventHandlerDelegate> values;
2582        synchronized (mRoutingChangeListeners) {
2583            values = mRoutingChangeListeners.values();
2584        }
2585        for(NativeRoutingEventHandlerDelegate delegate : values) {
2586            Handler handler = delegate.getHandler();
2587            if (handler != null) {
2588                handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE);
2589            }
2590        }
2591        // >= "N" Routing
2592        Collection<NativeNewRoutingEventHandlerDelegate> newValues;
2593        synchronized (mNewRoutingChangeListeners) {
2594            newValues = mNewRoutingChangeListeners.values();
2595        }
2596        for(NativeNewRoutingEventHandlerDelegate delegate : newValues) {
2597            Handler handler = delegate.getHandler();
2598            if (handler != null) {
2599                handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE);
2600            }
2601        }
2602    }
2603
2604    //---------------------------------------------------------
2605    // Interface definitions
2606    //--------------------
2607    /**
2608     * Interface definition for a callback to be invoked when the playback head position of
2609     * an AudioTrack has reached a notification marker or has increased by a certain period.
2610     */
2611    public interface OnPlaybackPositionUpdateListener  {
2612        /**
2613         * Called on the listener to notify it that the previously set marker has been reached
2614         * by the playback head.
2615         */
2616        void onMarkerReached(AudioTrack track);
2617
2618        /**
2619         * Called on the listener to periodically notify it that the playback head has reached
2620         * a multiple of the notification period.
2621         */
2622        void onPeriodicNotification(AudioTrack track);
2623    }
2624
2625    //---------------------------------------------------------
2626    // Inner classes
2627    //--------------------
2628    /**
2629     * Helper class to handle the forwarding of native events to the appropriate listener
2630     * (potentially) handled in a different thread
2631     */
2632    private class NativePositionEventHandlerDelegate {
2633        private final Handler mHandler;
2634
2635        NativePositionEventHandlerDelegate(final AudioTrack track,
2636                                   final OnPlaybackPositionUpdateListener listener,
2637                                   Handler handler) {
2638            // find the looper for our new event handler
2639            Looper looper;
2640            if (handler != null) {
2641                looper = handler.getLooper();
2642            } else {
2643                // no given handler, use the looper the AudioTrack was created in
2644                looper = mInitializationLooper;
2645            }
2646
2647            // construct the event handler with this looper
2648            if (looper != null) {
2649                // implement the event handler delegate
2650                mHandler = new Handler(looper) {
2651                    @Override
2652                    public void handleMessage(Message msg) {
2653                        if (track == null) {
2654                            return;
2655                        }
2656                        switch(msg.what) {
2657                        case NATIVE_EVENT_MARKER:
2658                            if (listener != null) {
2659                                listener.onMarkerReached(track);
2660                            }
2661                            break;
2662                        case NATIVE_EVENT_NEW_POS:
2663                            if (listener != null) {
2664                                listener.onPeriodicNotification(track);
2665                            }
2666                            break;
2667                        default:
2668                            loge("Unknown native event type: " + msg.what);
2669                            break;
2670                        }
2671                    }
2672                };
2673            } else {
2674                mHandler = null;
2675            }
2676        }
2677
2678        Handler getHandler() {
2679            return mHandler;
2680        }
2681    }
2682
2683    /**
2684     * Marshmallow Routing API.
2685     * Helper class to handle the forwarding of native events to the appropriate listener
2686     * (potentially) handled in a different thread
2687     */
2688    private class NativeRoutingEventHandlerDelegate {
2689        private final Handler mHandler;
2690
2691        NativeRoutingEventHandlerDelegate(final AudioTrack track,
2692                                   final OnRoutingChangedListener listener,
2693                                   Handler handler) {
2694            // find the looper for our new event handler
2695            Looper looper;
2696            if (handler != null) {
2697                looper = handler.getLooper();
2698            } else {
2699                // no given handler, use the looper the AudioTrack was created in
2700                looper = mInitializationLooper;
2701            }
2702
2703            // construct the event handler with this looper
2704            if (looper != null) {
2705                // implement the event handler delegate
2706                mHandler = new Handler(looper) {
2707                    @Override
2708                    public void handleMessage(Message msg) {
2709                        if (track == null) {
2710                            return;
2711                        }
2712                        switch(msg.what) {
2713                        case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE:
2714                            if (listener != null) {
2715                                listener.onRoutingChanged(track);
2716                            }
2717                            break;
2718                        default:
2719                            loge("Unknown native event type: " + msg.what);
2720                            break;
2721                        }
2722                    }
2723                };
2724            } else {
2725                mHandler = null;
2726            }
2727        }
2728
2729        Handler getHandler() {
2730            return mHandler;
2731        }
2732    }
2733
2734    /**
2735     * Marshmallow Routing API.
2736     * Helper class to handle the forwarding of native events to the appropriate listener
2737     * (potentially) handled in a different thread
2738     */
2739    private class NativeNewRoutingEventHandlerDelegate {
2740        private final Handler mHandler;
2741
2742        NativeNewRoutingEventHandlerDelegate(final AudioTrack track,
2743                                   final AudioRouting.OnRoutingChangedListener listener,
2744                                   Handler handler) {
2745            // find the looper for our new event handler
2746            Looper looper;
2747            if (handler != null) {
2748                looper = handler.getLooper();
2749            } else {
2750                // no given handler, use the looper the AudioTrack was created in
2751                looper = mInitializationLooper;
2752            }
2753
2754            // construct the event handler with this looper
2755            if (looper != null) {
2756                // implement the event handler delegate
2757                mHandler = new Handler(looper) {
2758                    @Override
2759                    public void handleMessage(Message msg) {
2760                        if (track == null) {
2761                            return;
2762                        }
2763                        switch(msg.what) {
2764                        case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE:
2765                            if (listener != null) {
2766                                listener.onRoutingChanged(track);
2767                            }
2768                            break;
2769                        default:
2770                            loge("Unknown native event type: " + msg.what);
2771                            break;
2772                        }
2773                    }
2774                };
2775            } else {
2776                mHandler = null;
2777            }
2778        }
2779
2780        Handler getHandler() {
2781            return mHandler;
2782        }
2783    }
2784
2785    //---------------------------------------------------------
2786    // Java methods called from the native side
2787    //--------------------
2788    @SuppressWarnings("unused")
2789    private static void postEventFromNative(Object audiotrack_ref,
2790            int what, int arg1, int arg2, Object obj) {
2791        //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
2792        AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
2793        if (track == null) {
2794            return;
2795        }
2796
2797        if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
2798            track.broadcastRoutingChange();
2799            return;
2800        }
2801        NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
2802        if (delegate != null) {
2803            Handler handler = delegate.getHandler();
2804            if (handler != null) {
2805                Message m = handler.obtainMessage(what, arg1, arg2, obj);
2806                handler.sendMessage(m);
2807            }
2808        }
2809    }
2810
2811
2812    //---------------------------------------------------------
2813    // Native methods called from the Java side
2814    //--------------------
2815
2816    // post-condition: mStreamType is overwritten with a value
2817    //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
2818    //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
2819    private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
2820            Object /*AudioAttributes*/ attributes,
2821            int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
2822            int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack);
2823
2824    private native final void native_finalize();
2825
2826    /**
2827     * @hide
2828     */
2829    public native final void native_release();
2830
2831    private native final void native_start();
2832
2833    private native final void native_stop();
2834
2835    private native final void native_pause();
2836
2837    private native final void native_flush();
2838
2839    private native final int native_write_byte(byte[] audioData,
2840                                               int offsetInBytes, int sizeInBytes, int format,
2841                                               boolean isBlocking);
2842
2843    private native final int native_write_short(short[] audioData,
2844                                                int offsetInShorts, int sizeInShorts, int format,
2845                                                boolean isBlocking);
2846
2847    private native final int native_write_float(float[] audioData,
2848                                                int offsetInFloats, int sizeInFloats, int format,
2849                                                boolean isBlocking);
2850
2851    private native final int native_write_native_bytes(Object audioData,
2852            int positionInBytes, int sizeInBytes, int format, boolean blocking);
2853
2854    private native final int native_reload_static();
2855
2856    private native final int native_get_buffer_size_frames();
2857    private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
2858    private native final int native_get_buffer_capacity_frames();
2859
2860    private native final void native_setVolume(float leftVolume, float rightVolume);
2861
2862    private native final int native_set_playback_rate(int sampleRateInHz);
2863    private native final int native_get_playback_rate();
2864
2865    private native final void native_set_playback_params(@NonNull PlaybackParams params);
2866    private native final @NonNull PlaybackParams native_get_playback_params();
2867
2868    private native final int native_set_marker_pos(int marker);
2869    private native final int native_get_marker_pos();
2870
2871    private native final int native_set_pos_update_period(int updatePeriod);
2872    private native final int native_get_pos_update_period();
2873
2874    private native final int native_set_position(int position);
2875    private native final int native_get_position();
2876
2877    private native final int native_get_latency();
2878
2879    private native final int native_get_underrun_count();
2880
2881    // longArray must be a non-null array of length >= 2
2882    // [0] is assigned the frame position
2883    // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
2884    private native final int native_get_timestamp(long[] longArray);
2885
2886    private native final int native_set_loop(int start, int end, int loopCount);
2887
2888    static private native final int native_get_output_sample_rate(int streamType);
2889    static private native final int native_get_min_buff_size(
2890            int sampleRateInHz, int channelConfig, int audioFormat);
2891
2892    private native final int native_attachAuxEffect(int effectId);
2893    private native final int native_setAuxEffectSendLevel(float level);
2894
2895    private native final boolean native_setOutputDevice(int deviceId);
2896    private native final int native_getRoutedDeviceId();
2897    private native final void native_enableDeviceCallback();
2898    private native final void native_disableDeviceCallback();
2899    static private native int native_get_FCC_8();
2900
2901    //---------------------------------------------------------
2902    // Utility methods
2903    //------------------
2904
2905    private static void logd(String msg) {
2906        Log.d(TAG, msg);
2907    }
2908
2909    private static void loge(String msg) {
2910        Log.e(TAG, msg);
2911    }
2912}
2913