AudioTrack.java revision a713814f17ccbf5bb63d1fbb75bc5cda7a0cc641
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.media;
18
19import java.lang.annotation.Retention;
20import java.lang.annotation.RetentionPolicy;
21import java.lang.ref.WeakReference;
22import java.nio.ByteBuffer;
23import java.nio.NioUtils;
24
25import android.annotation.IntDef;
26import android.app.ActivityThread;
27import android.app.AppOpsManager;
28import android.content.Context;
29import android.os.Handler;
30import android.os.IBinder;
31import android.os.Looper;
32import android.os.Message;
33import android.os.Process;
34import android.os.RemoteException;
35import android.os.ServiceManager;
36import android.util.Log;
37
38import com.android.internal.app.IAppOpsService;
39
40
41/**
42 * The AudioTrack class manages and plays a single audio resource for Java applications.
43 * It allows streaming of PCM audio buffers to the audio sink for playback. This is
44 * achieved by "pushing" the data to the AudioTrack object using one of the
45 *  {@link #write(byte[], int, int)} and {@link #write(short[], int, int)} methods.
46 *
47 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
48 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
49 * one of the {@code write()} methods. These are blocking and return when the data has been
50 * transferred from the Java layer to the native layer and queued for playback. The streaming
51 * mode is most useful when playing blocks of audio data that for instance are:
52 *
53 * <ul>
54 *   <li>too big to fit in memory because of the duration of the sound to play,</li>
55 *   <li>too big to fit in memory because of the characteristics of the audio data
56 *         (high sampling rate, bits per sample ...)</li>
57 *   <li>received or generated while previously queued audio is playing.</li>
58 * </ul>
59 *
60 * The static mode should be chosen when dealing with short sounds that fit in memory and
61 * that need to be played with the smallest latency possible. The static mode will
62 * therefore be preferred for UI and game sounds that are played often, and with the
63 * smallest overhead possible.
64 *
65 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
66 * The size of this buffer, specified during the construction, determines how long an AudioTrack
67 * can play before running out of data.<br>
68 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
69 * be played from it.<br>
70 * For the streaming mode, data will be written to the audio sink in chunks of
71 * sizes less than or equal to the total buffer size.
72 *
73 * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
74 */
75public class AudioTrack
76{
77    //---------------------------------------------------------
78    // Constants
79    //--------------------
80    /** Minimum value for a linear gain or auxiliary effect level.
81     *  This value must be exactly equal to 0.0f; do not change it.
82     */
83    private static final float GAIN_MIN = 0.0f;
84    /** Maximum value for a linear gain or auxiliary effect level.
85     *  This value must be greater than or equal to 1.0f.
86     */
87    private static final float GAIN_MAX = 1.0f;
88
89    /** Minimum value for sample rate */
90    private static final int SAMPLE_RATE_HZ_MIN = 4000;
91    /** Maximum value for sample rate */
92    private static final int SAMPLE_RATE_HZ_MAX = 48000;
93
94    /** indicates AudioTrack state is stopped */
95    public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
96    /** indicates AudioTrack state is paused */
97    public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
98    /** indicates AudioTrack state is playing */
99    public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
100
101    // keep these values in sync with android_media_AudioTrack.cpp
102    /**
103     * Creation mode where audio data is transferred from Java to the native layer
104     * only once before the audio starts playing.
105     */
106    public static final int MODE_STATIC = 0;
107    /**
108     * Creation mode where audio data is streamed from Java to the native layer
109     * as the audio is playing.
110     */
111    public static final int MODE_STREAM = 1;
112
113    /**
114     * State of an AudioTrack that was not successfully initialized upon creation.
115     */
116    public static final int STATE_UNINITIALIZED = 0;
117    /**
118     * State of an AudioTrack that is ready to be used.
119     */
120    public static final int STATE_INITIALIZED   = 1;
121    /**
122     * State of a successfully initialized AudioTrack that uses static data,
123     * but that hasn't received that data yet.
124     */
125    public static final int STATE_NO_STATIC_DATA = 2;
126
127    // Error codes:
128    // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
129    /**
130     * Denotes a successful operation.
131     */
132    public  static final int SUCCESS                               = 0;
133    /**
134     * Denotes a generic operation failure.
135     */
136    public  static final int ERROR                                 = -1;
137    /**
138     * Denotes a failure due to the use of an invalid value.
139     */
140    public  static final int ERROR_BAD_VALUE                       = -2;
141    /**
142     * Denotes a failure due to the improper use of a method.
143     */
144    public  static final int ERROR_INVALID_OPERATION               = -3;
145
146    private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
147    private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
148    private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
149    private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
150    private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
151
152    // Events:
153    // to keep in sync with frameworks/av/include/media/AudioTrack.h
154    /**
155     * Event id denotes when playback head has reached a previously set marker.
156     */
157    private static final int NATIVE_EVENT_MARKER  = 3;
158    /**
159     * Event id denotes when previously set update period has elapsed during playback.
160     */
161    private static final int NATIVE_EVENT_NEW_POS = 4;
162
163    private final static String TAG = "android.media.AudioTrack";
164
165
166    /** @hide */
167    @IntDef({
168        WRITE_BLOCKING,
169        WRITE_NON_BLOCKING
170    })
171    @Retention(RetentionPolicy.SOURCE)
172    public @interface WriteMode {}
173
174    /**
175     * The write mode indicating the write operation will block until all data has been written,
176     * to be used in {@link #write(ByteBuffer, int, int, int)}.
177     */
178    public final static int WRITE_BLOCKING = 0;
179    /**
180     * The write mode indicating the write operation will return immediately after
181     * queuing as much audio data for playback as possible without blocking, to be used in
182     * {@link #write(ByteBuffer, int, int, int)}.
183     */
184    public final static int WRITE_NON_BLOCKING = 1;
185
186    //--------------------------------------------------------------------------
187    // Member variables
188    //--------------------
189    /**
190     * Indicates the state of the AudioTrack instance.
191     */
192    private int mState = STATE_UNINITIALIZED;
193    /**
194     * Indicates the play state of the AudioTrack instance.
195     */
196    private int mPlayState = PLAYSTATE_STOPPED;
197    /**
198     * Lock to make sure mPlayState updates are reflecting the actual state of the object.
199     */
200    private final Object mPlayStateLock = new Object();
201    /**
202     * Sizes of the native audio buffer.
203     */
204    private int mNativeBufferSizeInBytes = 0;
205    private int mNativeBufferSizeInFrames = 0;
206    /**
207     * Handler for events coming from the native code.
208     */
209    private NativeEventHandlerDelegate mEventHandlerDelegate;
210    /**
211     * Looper associated with the thread that creates the AudioTrack instance.
212     */
213    private final Looper mInitializationLooper;
214    /**
215     * The audio data source sampling rate in Hz.
216     */
217    private int mSampleRate; // initialized by all constructors
218    /**
219     * The number of audio output channels (1 is mono, 2 is stereo).
220     */
221    private int mChannelCount = 1;
222    /**
223     * The audio channel mask.
224     */
225    private int mChannels = AudioFormat.CHANNEL_OUT_MONO;
226
227    /**
228     * The type of the audio stream to play. See
229     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
230     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
231     *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
232     *   {@link AudioManager#STREAM_DTMF}.
233     */
234    private int mStreamType = AudioManager.STREAM_MUSIC;
235    /**
236     * The way audio is consumed by the audio sink, streaming or static.
237     */
238    private int mDataLoadMode = MODE_STREAM;
239    /**
240     * The current audio channel configuration.
241     */
242    private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
243    /**
244     * The encoding of the audio samples.
245     * @see AudioFormat#ENCODING_PCM_8BIT
246     * @see AudioFormat#ENCODING_PCM_16BIT
247     */
248    private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
249    /**
250     * Audio session ID
251     */
252    private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
253    /**
254     * Reference to the app-ops service.
255     */
256    private final IAppOpsService mAppOps;
257
258    //--------------------------------
259    // Used exclusively by native code
260    //--------------------
261    /**
262     * Accessed by native methods: provides access to C++ AudioTrack object.
263     */
264    @SuppressWarnings("unused")
265    private long mNativeTrackInJavaObj;
266    /**
267     * Accessed by native methods: provides access to the JNI data (i.e. resources used by
268     * the native AudioTrack object, but not stored in it).
269     */
270    @SuppressWarnings("unused")
271    private long mJniData;
272
273
274    //--------------------------------------------------------------------------
275    // Constructor, Finalize
276    //--------------------
277    /**
278     * Class constructor.
279     * @param streamType the type of the audio stream. See
280     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
281     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
282     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
283     * @param sampleRateInHz the initial source sample rate expressed in Hz.
284     * @param channelConfig describes the configuration of the audio channels.
285     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
286     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
287     * @param audioFormat the format in which the audio data is represented.
288     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
289     *   {@link AudioFormat#ENCODING_PCM_8BIT}
290     * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
291     *   read from for playback.
292     *   If track's creation mode is {@link #MODE_STREAM}, you can write data into
293     *   this buffer in chunks less than or equal to this size, and it is typical to use
294     *   chunks of 1/2 of the total size to permit double-buffering.
295     *   If the track's creation mode is {@link #MODE_STATIC},
296     *   this is the maximum length sample, or audio clip, that can be played by this instance.
297     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
298     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
299     *   smaller than getMinBufferSize() will result in an initialization failure.
300     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
301     * @throws java.lang.IllegalArgumentException
302     */
303    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
304            int bufferSizeInBytes, int mode)
305    throws IllegalArgumentException {
306        this(streamType, sampleRateInHz, channelConfig, audioFormat,
307                bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE);
308    }
309
310    /**
311     * Class constructor with audio session. Use this constructor when the AudioTrack must be
312     * attached to a particular audio session. The primary use of the audio session ID is to
313     * associate audio effects to a particular instance of AudioTrack: if an audio session ID
314     * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
315     * and media players in the same session and not to the output mix.
316     * When an AudioTrack is created without specifying a session, it will create its own session
317     * which can be retrieved by calling the {@link #getAudioSessionId()} method.
318     * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
319     * session
320     * with all other media players or audio tracks in the same session, otherwise a new session
321     * will be created for this track if none is supplied.
322     * @param streamType the type of the audio stream. See
323     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
324     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
325     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
326     * @param sampleRateInHz the initial source sample rate expressed in Hz.
327     * @param channelConfig describes the configuration of the audio channels.
328     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
329     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
330     * @param audioFormat the format in which the audio data is represented.
331     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
332     *   {@link AudioFormat#ENCODING_PCM_8BIT}
333     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
334     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
335     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
336     *   this is the maximum size of the sound that will be played for this instance.
337     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
338     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
339     *   smaller than getMinBufferSize() will result in an initialization failure.
340     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
341     * @param sessionId Id of audio session the AudioTrack must be attached to
342     * @throws java.lang.IllegalArgumentException
343     */
344    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
345            int bufferSizeInBytes, int mode, int sessionId)
346    throws IllegalArgumentException {
347        // mState already == STATE_UNINITIALIZED
348
349        // remember which looper is associated with the AudioTrack instantiation
350        Looper looper;
351        if ((looper = Looper.myLooper()) == null) {
352            looper = Looper.getMainLooper();
353        }
354        mInitializationLooper = looper;
355
356        audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);
357
358        audioBuffSizeCheck(bufferSizeInBytes);
359
360        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
361        mAppOps = IAppOpsService.Stub.asInterface(b);
362
363        if (sessionId < 0) {
364            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
365        }
366
367        int[] session = new int[1];
368        session[0] = sessionId;
369        // native initialization
370        int initResult = native_setup(new WeakReference<AudioTrack>(this),
371                mStreamType, mSampleRate, mChannels, mAudioFormat,
372                mNativeBufferSizeInBytes, mDataLoadMode, session);
373        if (initResult != SUCCESS) {
374            loge("Error code "+initResult+" when initializing AudioTrack.");
375            return; // with mState == STATE_UNINITIALIZED
376        }
377
378        mSessionId = session[0];
379
380        if (mDataLoadMode == MODE_STATIC) {
381            mState = STATE_NO_STATIC_DATA;
382        } else {
383            mState = STATE_INITIALIZED;
384        }
385    }
386
387    // mask of all the channels supported by this implementation
388    private static final int SUPPORTED_OUT_CHANNELS =
389            AudioFormat.CHANNEL_OUT_FRONT_LEFT |
390            AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
391            AudioFormat.CHANNEL_OUT_FRONT_CENTER |
392            AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
393            AudioFormat.CHANNEL_OUT_BACK_LEFT |
394            AudioFormat.CHANNEL_OUT_BACK_RIGHT |
395            AudioFormat.CHANNEL_OUT_BACK_CENTER;
396
397    // Convenience method for the constructor's parameter checks.
398    // This is where constructor IllegalArgumentException-s are thrown
399    // postconditions:
400    //    mStreamType is valid
401    //    mChannelCount is valid
402    //    mChannels is valid
403    //    mAudioFormat is valid
404    //    mSampleRate is valid
405    //    mDataLoadMode is valid
406    private void audioParamCheck(int streamType, int sampleRateInHz,
407                                 int channelConfig, int audioFormat, int mode) {
408
409        //--------------
410        // stream type
411        if( (streamType != AudioManager.STREAM_ALARM) && (streamType != AudioManager.STREAM_MUSIC)
412           && (streamType != AudioManager.STREAM_RING) && (streamType != AudioManager.STREAM_SYSTEM)
413           && (streamType != AudioManager.STREAM_VOICE_CALL)
414           && (streamType != AudioManager.STREAM_NOTIFICATION)
415           && (streamType != AudioManager.STREAM_BLUETOOTH_SCO)
416           && (streamType != AudioManager.STREAM_DTMF)) {
417            throw new IllegalArgumentException("Invalid stream type.");
418        }
419        mStreamType = streamType;
420
421        //--------------
422        // sample rate, note these values are subject to change
423        if ( (sampleRateInHz < 4000) || (sampleRateInHz > 48000) ) {
424            throw new IllegalArgumentException(sampleRateInHz
425                    + "Hz is not a supported sample rate.");
426        }
427        mSampleRate = sampleRateInHz;
428
429        //--------------
430        // channel config
431        mChannelConfiguration = channelConfig;
432
433        switch (channelConfig) {
434        case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
435        case AudioFormat.CHANNEL_OUT_MONO:
436        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
437            mChannelCount = 1;
438            mChannels = AudioFormat.CHANNEL_OUT_MONO;
439            break;
440        case AudioFormat.CHANNEL_OUT_STEREO:
441        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
442            mChannelCount = 2;
443            mChannels = AudioFormat.CHANNEL_OUT_STEREO;
444            break;
445        default:
446            if (!isMultichannelConfigSupported(channelConfig)) {
447                // input channel configuration features unsupported channels
448                throw new IllegalArgumentException("Unsupported channel configuration.");
449            }
450            mChannels = channelConfig;
451            mChannelCount = Integer.bitCount(channelConfig);
452        }
453
454        //--------------
455        // audio format
456        switch (audioFormat) {
457        case AudioFormat.ENCODING_DEFAULT:
458            mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
459            break;
460        case AudioFormat.ENCODING_PCM_16BIT:
461        case AudioFormat.ENCODING_PCM_8BIT:
462            mAudioFormat = audioFormat;
463            break;
464        default:
465            throw new IllegalArgumentException("Unsupported sample encoding."
466                + " Should be ENCODING_PCM_8BIT or ENCODING_PCM_16BIT.");
467        }
468
469        //--------------
470        // audio load mode
471        if ( (mode != MODE_STREAM) && (mode != MODE_STATIC) ) {
472            throw new IllegalArgumentException("Invalid mode.");
473        }
474        mDataLoadMode = mode;
475    }
476
477    /**
478     * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
479     * @param channelConfig the mask to validate
480     * @return false if the AudioTrack can't be used with such a mask
481     */
482    private static boolean isMultichannelConfigSupported(int channelConfig) {
483        // check for unsupported channels
484        if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
485            loge("Channel configuration features unsupported channels");
486            return false;
487        }
488        // check for unsupported multichannel combinations:
489        // - FL/FR must be present
490        // - L/R channels must be paired (e.g. no single L channel)
491        final int frontPair =
492                AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
493        if ((channelConfig & frontPair) != frontPair) {
494                loge("Front channels must be present in multichannel configurations");
495                return false;
496        }
497        final int backPair =
498                AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
499        if ((channelConfig & backPair) != 0) {
500            if ((channelConfig & backPair) != backPair) {
501                loge("Rear channels can't be used independently");
502                return false;
503            }
504        }
505        return true;
506    }
507
508
509    // Convenience method for the constructor's audio buffer size check.
510    // preconditions:
511    //    mChannelCount is valid
512    //    mAudioFormat is valid
513    // postcondition:
514    //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
515    private void audioBuffSizeCheck(int audioBufferSize) {
516        // NB: this section is only valid with PCM data.
517        //     To update when supporting compressed formats
518        int frameSizeInBytes = mChannelCount
519                * (AudioFormat.getBytesPerSample(mAudioFormat));
520        if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
521            throw new IllegalArgumentException("Invalid audio buffer size.");
522        }
523
524        mNativeBufferSizeInBytes = audioBufferSize;
525        mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
526    }
527
528
529    /**
530     * Releases the native AudioTrack resources.
531     */
532    public void release() {
533        // even though native_release() stops the native AudioTrack, we need to stop
534        // AudioTrack subclasses too.
535        try {
536            stop();
537        } catch(IllegalStateException ise) {
538            // don't raise an exception, we're releasing the resources.
539        }
540        native_release();
541        mState = STATE_UNINITIALIZED;
542    }
543
544    @Override
545    protected void finalize() {
546        native_finalize();
547    }
548
549    //--------------------------------------------------------------------------
550    // Getters
551    //--------------------
552    /**
553     * Returns the minimum gain value, which is the constant 0.0.
554     * Gain values less than 0.0 will be clamped to 0.0.
555     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
556     * @return the minimum value, which is the constant 0.0.
557     */
558    static public float getMinVolume() {
559        return GAIN_MIN;
560    }
561
562    /**
563     * Returns the maximum gain value, which is greater than or equal to 1.0.
564     * Gain values greater than the maximum will be clamped to the maximum.
565     * <p>The word "volume" in the API name is historical; this is actually a gain.
566     * expressed as a linear multiplier on sample values, where a maximum value of 1.0
567     * corresponds to a gain of 0 dB (sample values left unmodified).
568     * @return the maximum value, which is greater than or equal to 1.0.
569     */
570    static public float getMaxVolume() {
571        return GAIN_MAX;
572    }
573
574    /**
575     * Returns the configured audio data sample rate in Hz
576     */
577    public int getSampleRate() {
578        return mSampleRate;
579    }
580
581    /**
582     * Returns the current playback rate in Hz.
583     */
584    public int getPlaybackRate() {
585        return native_get_playback_rate();
586    }
587
588    /**
589     * Returns the configured audio data format. See {@link AudioFormat#ENCODING_PCM_16BIT}
590     * and {@link AudioFormat#ENCODING_PCM_8BIT}.
591     */
592    public int getAudioFormat() {
593        return mAudioFormat;
594    }
595
596    /**
597     * Returns the type of audio stream this AudioTrack is configured for.
598     * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
599     * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
600     * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
601     * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
602     */
603    public int getStreamType() {
604        return mStreamType;
605    }
606
607    /**
608     * Returns the configured channel configuration.
609     * See {@link AudioFormat#CHANNEL_OUT_MONO}
610     * and {@link AudioFormat#CHANNEL_OUT_STEREO}.
611     */
612    public int getChannelConfiguration() {
613        return mChannelConfiguration;
614    }
615
616    /**
617     * Returns the configured number of channels.
618     */
619    public int getChannelCount() {
620        return mChannelCount;
621    }
622
623    /**
624     * Returns the state of the AudioTrack instance. This is useful after the
625     * AudioTrack instance has been created to check if it was initialized
626     * properly. This ensures that the appropriate resources have been acquired.
627     * @see #STATE_INITIALIZED
628     * @see #STATE_NO_STATIC_DATA
629     * @see #STATE_UNINITIALIZED
630     */
631    public int getState() {
632        return mState;
633    }
634
635    /**
636     * Returns the playback state of the AudioTrack instance.
637     * @see #PLAYSTATE_STOPPED
638     * @see #PLAYSTATE_PAUSED
639     * @see #PLAYSTATE_PLAYING
640     */
641    public int getPlayState() {
642        synchronized (mPlayStateLock) {
643            return mPlayState;
644        }
645    }
646
647    /**
648     *  Returns the "native frame count", derived from the bufferSizeInBytes specified at
649     *  creation time and converted to frame units.
650     *  If track's creation mode is {@link #MODE_STATIC},
651     *  it is equal to the specified bufferSizeInBytes converted to frame units.
652     *  If track's creation mode is {@link #MODE_STREAM},
653     *  it is typically greater than or equal to the specified bufferSizeInBytes converted to frame
654     *  units; it may be rounded up to a larger value if needed by the target device implementation.
655     *  @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
656     *  See {@link AudioManager#getProperty(String)} for key
657     *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
658     */
659    @Deprecated
660    protected int getNativeFrameCount() {
661        return native_get_native_frame_count();
662    }
663
664    /**
665     * Returns marker position expressed in frames.
666     * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
667     * or zero if marker is disabled.
668     */
669    public int getNotificationMarkerPosition() {
670        return native_get_marker_pos();
671    }
672
673    /**
674     * Returns the notification update period expressed in frames.
675     * Zero means that no position update notifications are being delivered.
676     */
677    public int getPositionNotificationPeriod() {
678        return native_get_pos_update_period();
679    }
680
681    /**
682     * Returns the playback head position expressed in frames.
683     * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
684     * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
685     * This is a continuously advancing counter.  It will wrap (overflow) periodically,
686     * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
687     * It is reset to zero by flush(), reload(), and stop().
688     */
689    public int getPlaybackHeadPosition() {
690        return native_get_position();
691    }
692
693    /**
694     * Returns this track's estimated latency in milliseconds. This includes the latency due
695     * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
696     *
697     * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
698     * a better solution.
699     * @hide
700     */
701    public int getLatency() {
702        return native_get_latency();
703    }
704
705    /**
706     *  Returns the output sample rate in Hz for the specified stream type.
707     */
708    static public int getNativeOutputSampleRate(int streamType) {
709        return native_get_output_sample_rate(streamType);
710    }
711
712    /**
713     * Returns the minimum buffer size required for the successful creation of an AudioTrack
714     * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
715     * guarantee a smooth playback under load, and higher values should be chosen according to
716     * the expected frequency at which the buffer will be refilled with additional data to play.
717     * For example, if you intend to dynamically set the source sample rate of an AudioTrack
718     * to a higher value than the initial source sample rate, be sure to configure the buffer size
719     * based on the highest planned sample rate.
720     * @param sampleRateInHz the source sample rate expressed in Hz.
721     * @param channelConfig describes the configuration of the audio channels.
722     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
723     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
724     * @param audioFormat the format in which the audio data is represented.
725     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
726     *   {@link AudioFormat#ENCODING_PCM_8BIT}
727     * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
728     *   or {@link #ERROR} if unable to query for output properties,
729     *   or the minimum buffer size expressed in bytes.
730     */
731    static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
732        int channelCount = 0;
733        switch(channelConfig) {
734        case AudioFormat.CHANNEL_OUT_MONO:
735        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
736            channelCount = 1;
737            break;
738        case AudioFormat.CHANNEL_OUT_STEREO:
739        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
740            channelCount = 2;
741            break;
742        default:
743            if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
744                // input channel configuration features unsupported channels
745                loge("getMinBufferSize(): Invalid channel configuration.");
746                return ERROR_BAD_VALUE;
747            } else {
748                channelCount = Integer.bitCount(channelConfig);
749            }
750        }
751
752        if ((audioFormat != AudioFormat.ENCODING_PCM_16BIT)
753            && (audioFormat != AudioFormat.ENCODING_PCM_8BIT)) {
754            loge("getMinBufferSize(): Invalid audio format.");
755            return ERROR_BAD_VALUE;
756        }
757
758        // sample rate, note these values are subject to change
759        if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {
760            loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
761            return ERROR_BAD_VALUE;
762        }
763
764        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
765        if (size <= 0) {
766            loge("getMinBufferSize(): error querying hardware");
767            return ERROR;
768        }
769        else {
770            return size;
771        }
772    }
773
774    /**
775     * Returns the audio session ID.
776     *
777     * @return the ID of the audio session this AudioTrack belongs to.
778     */
779    public int getAudioSessionId() {
780        return mSessionId;
781    }
782
783   /**
784    * Poll for a timestamp on demand.
785    *
786    * Use if you need to get the most recent timestamp outside of the event callback handler.
787    * Calling this method too often may be inefficient;
788    * if you need a high-resolution mapping between frame position and presentation time,
789    * consider implementing that at application level, based on low-resolution timestamps.
790    * The audio data at the returned position may either already have been
791    * presented, or may have not yet been presented but is committed to be presented.
792    * It is not possible to request the time corresponding to a particular position,
793    * or to request the (fractional) position corresponding to a particular time.
794    * If you need such features, consider implementing them at application level.
795    *
796    * @param timestamp a reference to a non-null AudioTimestamp instance allocated
797    *        and owned by caller.
798    * @return true if a timestamp is available, or false if no timestamp is available.
799    *         If a timestamp if available,
800    *         the AudioTimestamp instance is filled in with a position in frame units, together
801    *         with the estimated time when that frame was presented or is committed to
802    *         be presented.
803    *         In the case that no timestamp is available, any supplied instance is left unaltered.
804    */
805    public boolean getTimestamp(AudioTimestamp timestamp)
806    {
807        if (timestamp == null) {
808            throw new IllegalArgumentException();
809        }
810        // It's unfortunate, but we have to either create garbage every time or use synchronized
811        long[] longArray = new long[2];
812        int ret = native_get_timestamp(longArray);
813        if (ret != SUCCESS) {
814            return false;
815        }
816        timestamp.framePosition = longArray[0];
817        timestamp.nanoTime = longArray[1];
818        return true;
819    }
820
821
822    //--------------------------------------------------------------------------
823    // Initialization / configuration
824    //--------------------
825    /**
826     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
827     * for each periodic playback head position update.
828     * Notifications will be received in the same thread as the one in which the AudioTrack
829     * instance was created.
830     * @param listener
831     */
832    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
833        setPlaybackPositionUpdateListener(listener, null);
834    }
835
836    /**
837     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
838     * for each periodic playback head position update.
839     * Use this method to receive AudioTrack events in the Handler associated with another
840     * thread than the one in which you created the AudioTrack instance.
841     * @param listener
842     * @param handler the Handler that will receive the event notification messages.
843     */
844    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
845                                                    Handler handler) {
846        if (listener != null) {
847            mEventHandlerDelegate = new NativeEventHandlerDelegate(this, listener, handler);
848        } else {
849            mEventHandlerDelegate = null;
850        }
851    }
852
853
854    private static float clampGainOrLevel(float gainOrLevel) {
855        if (Float.isNaN(gainOrLevel)) {
856            throw new IllegalArgumentException();
857        }
858        if (gainOrLevel < GAIN_MIN) {
859            gainOrLevel = GAIN_MIN;
860        } else if (gainOrLevel > GAIN_MAX) {
861            gainOrLevel = GAIN_MAX;
862        }
863        return gainOrLevel;
864    }
865
866
867     /**
868     * Sets the specified left and right output gain values on the AudioTrack.
869     * <p>Gain values are clamped to the closed interval [0.0, max] where
870     * max is the value of {@link #getMaxVolume}.
871     * A value of 0.0 results in zero gain (silence), and
872     * a value of 1.0 means unity gain (signal unchanged).
873     * The default value is 1.0 meaning unity gain.
874     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
875     * @param leftGain output gain for the left channel.
876     * @param rightGain output gain for the right channel
877     * @return error code or success, see {@link #SUCCESS},
878     *    {@link #ERROR_INVALID_OPERATION}
879     * @deprecated Applications should use {@link #setVolume} instead, as it
880     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
881     */
882    public int setStereoVolume(float leftGain, float rightGain) {
883        if (isRestricted()) {
884            return SUCCESS;
885        }
886        if (mState == STATE_UNINITIALIZED) {
887            return ERROR_INVALID_OPERATION;
888        }
889
890        leftGain = clampGainOrLevel(leftGain);
891        rightGain = clampGainOrLevel(rightGain);
892
893        native_setVolume(leftGain, rightGain);
894
895        return SUCCESS;
896    }
897
898
899    /**
900     * Sets the specified output gain value on all channels of this track.
901     * <p>Gain values are clamped to the closed interval [0.0, max] where
902     * max is the value of {@link #getMaxVolume}.
903     * A value of 0.0 results in zero gain (silence), and
904     * a value of 1.0 means unity gain (signal unchanged).
905     * The default value is 1.0 meaning unity gain.
906     * <p>This API is preferred over {@link #setStereoVolume}, as it
907     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
908     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
909     * @param gain output gain for all channels.
910     * @return error code or success, see {@link #SUCCESS},
911     *    {@link #ERROR_INVALID_OPERATION}
912     */
913    public int setVolume(float gain) {
914        return setStereoVolume(gain, gain);
915    }
916
917
918    /**
919     * Sets the playback sample rate for this track. This sets the sampling rate at which
920     * the audio data will be consumed and played back
921     * (as set by the sampleRateInHz parameter in the
922     * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
923     * not the original sampling rate of the
924     * content. For example, setting it to half the sample rate of the content will cause the
925     * playback to last twice as long, but will also result in a pitch shift down by one octave.
926     * The valid sample rate range is from 1 Hz to twice the value returned by
927     * {@link #getNativeOutputSampleRate(int)}.
928     * @param sampleRateInHz the sample rate expressed in Hz
929     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
930     *    {@link #ERROR_INVALID_OPERATION}
931     */
932    public int setPlaybackRate(int sampleRateInHz) {
933        if (mState != STATE_INITIALIZED) {
934            return ERROR_INVALID_OPERATION;
935        }
936        if (sampleRateInHz <= 0) {
937            return ERROR_BAD_VALUE;
938        }
939        return native_set_playback_rate(sampleRateInHz);
940    }
941
942
943    /**
944     * Sets the position of the notification marker.  At most one marker can be active.
945     * @param markerInFrames marker position in wrapping frame units similar to
946     * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
947     * To set a marker at a position which would appear as zero due to wraparound,
948     * a workaround is to use a non-zero position near zero, such as -1 or 1.
949     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
950     *  {@link #ERROR_INVALID_OPERATION}
951     */
952    public int setNotificationMarkerPosition(int markerInFrames) {
953        if (mState == STATE_UNINITIALIZED) {
954            return ERROR_INVALID_OPERATION;
955        }
956        return native_set_marker_pos(markerInFrames);
957    }
958
959
960    /**
961     * Sets the period for the periodic notification event.
962     * @param periodInFrames update period expressed in frames
963     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
964     */
965    public int setPositionNotificationPeriod(int periodInFrames) {
966        if (mState == STATE_UNINITIALIZED) {
967            return ERROR_INVALID_OPERATION;
968        }
969        return native_set_pos_update_period(periodInFrames);
970    }
971
972
973    /**
974     * Sets the playback head position.
975     * The track must be stopped or paused for the position to be changed,
976     * and must use the {@link #MODE_STATIC} mode.
977     * @param positionInFrames playback head position expressed in frames
978     * Zero corresponds to start of buffer.
979     * The position must not be greater than the buffer size in frames, or negative.
980     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
981     *    {@link #ERROR_INVALID_OPERATION}
982     */
983    public int setPlaybackHeadPosition(int positionInFrames) {
984        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
985                getPlayState() == PLAYSTATE_PLAYING) {
986            return ERROR_INVALID_OPERATION;
987        }
988        if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
989            return ERROR_BAD_VALUE;
990        }
991        return native_set_position(positionInFrames);
992    }
993
994    /**
995     * Sets the loop points and the loop count. The loop can be infinite.
996     * Similarly to setPlaybackHeadPosition,
997     * the track must be stopped or paused for the loop points to be changed,
998     * and must use the {@link #MODE_STATIC} mode.
999     * @param startInFrames loop start marker expressed in frames
1000     * Zero corresponds to start of buffer.
1001     * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1002     * @param endInFrames loop end marker expressed in frames
1003     * The total buffer size in frames corresponds to end of buffer.
1004     * The end marker must not be greater than the buffer size in frames.
1005     * For looping, the end marker must not be less than or equal to the start marker,
1006     * but to disable looping
1007     * it is permitted for start marker, end marker, and loop count to all be 0.
1008     * @param loopCount the number of times the loop is looped.
1009     *    A value of -1 means infinite looping, and 0 disables looping.
1010     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1011     *    {@link #ERROR_INVALID_OPERATION}
1012     */
1013    public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1014        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1015                getPlayState() == PLAYSTATE_PLAYING) {
1016            return ERROR_INVALID_OPERATION;
1017        }
1018        if (loopCount == 0) {
1019            ;   // explicitly allowed as an exception to the loop region range check
1020        } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1021                startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1022            return ERROR_BAD_VALUE;
1023        }
1024        return native_set_loop(startInFrames, endInFrames, loopCount);
1025    }
1026
1027    /**
1028     * Sets the initialization state of the instance. This method was originally intended to be used
1029     * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1030     * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1031     * @param state the state of the AudioTrack instance
1032     * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1033     */
1034    @Deprecated
1035    protected void setState(int state) {
1036        mState = state;
1037    }
1038
1039
1040    //---------------------------------------------------------
1041    // Transport control methods
1042    //--------------------
1043    /**
1044     * Starts playing an AudioTrack.
1045     * If track's creation mode is {@link #MODE_STATIC}, you must have called write() prior.
1046     *
1047     * @throws IllegalStateException
1048     */
1049    public void play()
1050    throws IllegalStateException {
1051        if (mState != STATE_INITIALIZED) {
1052            throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1053        }
1054        if (isRestricted()) {
1055            setVolume(0);
1056        }
1057        synchronized(mPlayStateLock) {
1058            native_start();
1059            mPlayState = PLAYSTATE_PLAYING;
1060        }
1061    }
1062
1063    private boolean isRestricted() {
1064        try {
1065            final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, mStreamType,
1066                    Process.myUid(), ActivityThread.currentPackageName());
1067            return mode != AppOpsManager.MODE_ALLOWED;
1068        } catch (RemoteException e) {
1069            return false;
1070        }
1071    }
1072
1073    /**
1074     * Stops playing the audio data.
1075     * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
1076     * after the last buffer that was written has been played. For an immediate stop, use
1077     * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
1078     * back yet.
1079     * @throws IllegalStateException
1080     */
1081    public void stop()
1082    throws IllegalStateException {
1083        if (mState != STATE_INITIALIZED) {
1084            throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
1085        }
1086
1087        // stop playing
1088        synchronized(mPlayStateLock) {
1089            native_stop();
1090            mPlayState = PLAYSTATE_STOPPED;
1091        }
1092    }
1093
1094    /**
1095     * Pauses the playback of the audio data. Data that has not been played
1096     * back will not be discarded. Subsequent calls to {@link #play} will play
1097     * this data back. See {@link #flush()} to discard this data.
1098     *
1099     * @throws IllegalStateException
1100     */
1101    public void pause()
1102    throws IllegalStateException {
1103        if (mState != STATE_INITIALIZED) {
1104            throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
1105        }
1106        //logd("pause()");
1107
1108        // pause playback
1109        synchronized(mPlayStateLock) {
1110            native_pause();
1111            mPlayState = PLAYSTATE_PAUSED;
1112        }
1113    }
1114
1115
1116    //---------------------------------------------------------
1117    // Audio data supply
1118    //--------------------
1119
1120    /**
1121     * Flushes the audio data currently queued for playback. Any data that has
1122     * not been played back will be discarded.  No-op if not stopped or paused,
1123     * or if the track's creation mode is not {@link #MODE_STREAM}.
1124     */
1125    public void flush() {
1126        if (mState == STATE_INITIALIZED) {
1127            // flush the data in native layer
1128            native_flush();
1129        }
1130
1131    }
1132
1133    /**
1134     * Writes the audio data to the audio sink for playback (streaming mode),
1135     * or copies audio data for later playback (static buffer mode).
1136     * In streaming mode, will block until all data has been written to the audio sink.
1137     * In static buffer mode, copies the data to the buffer starting at offset 0.
1138     * Note that the actual playback of this data might occur after this function
1139     * returns. This function is thread safe with respect to {@link #stop} calls,
1140     * in which case all of the specified data might not be written to the audio sink.
1141     *
1142     * @param audioData the array that holds the data to play.
1143     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
1144     *    starts.
1145     * @param sizeInBytes the number of bytes to read in audioData after the offset.
1146     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
1147     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1148     *    the parameters don't resolve to valid data and indexes.
1149     */
1150
1151    public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) {
1152
1153        if (mState == STATE_UNINITIALIZED) {
1154            return ERROR_INVALID_OPERATION;
1155        }
1156
1157        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
1158                || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
1159                || (offsetInBytes + sizeInBytes > audioData.length)) {
1160            return ERROR_BAD_VALUE;
1161        }
1162
1163        int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
1164                true /*isBlocking*/);
1165
1166        if ((mDataLoadMode == MODE_STATIC)
1167                && (mState == STATE_NO_STATIC_DATA)
1168                && (ret > 0)) {
1169            // benign race with respect to other APIs that read mState
1170            mState = STATE_INITIALIZED;
1171        }
1172
1173        return ret;
1174    }
1175
1176
1177    /**
1178     * Writes the audio data to the audio sink for playback (streaming mode),
1179     * or copies audio data for later playback (static buffer mode).
1180     * In streaming mode, will block until all data has been written to the audio sink.
1181     * In static buffer mode, copies the data to the buffer starting at offset 0.
1182     * Note that the actual playback of this data might occur after this function
1183     * returns. This function is thread safe with respect to {@link #stop} calls,
1184     * in which case all of the specified data might not be written to the audio sink.
1185     *
1186     * @param audioData the array that holds the data to play.
1187     * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1188     *     starts.
1189     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1190     * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION}
1191      *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1192      *    the parameters don't resolve to valid data and indexes.
1193     */
1194
1195    public int write(short[] audioData, int offsetInShorts, int sizeInShorts) {
1196
1197        if (mState == STATE_UNINITIALIZED) {
1198            return ERROR_INVALID_OPERATION;
1199        }
1200
1201        if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
1202                || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
1203                || (offsetInShorts + sizeInShorts > audioData.length)) {
1204            return ERROR_BAD_VALUE;
1205        }
1206
1207        int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat);
1208
1209        if ((mDataLoadMode == MODE_STATIC)
1210                && (mState == STATE_NO_STATIC_DATA)
1211                && (ret > 0)) {
1212            // benign race with respect to other APIs that read mState
1213            mState = STATE_INITIALIZED;
1214        }
1215
1216        return ret;
1217    }
1218
1219
1220    /**
1221     * Writes the audio data to the audio sink for playback (streaming mode),
1222     * or copies audio data for later playback (static buffer mode).
1223     * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write
1224     * mode is ignored.
1225     * In streaming mode, the blocking behavior will depend on the write mode.
1226     * @param audioData the buffer that holds the data to play, starting at the position reported
1227     *     by <code>audioData.position()</code>.
1228     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
1229     *     have been advanced to reflect the amount of data that was successfully written to
1230     *     the AudioTrack.
1231     * @param sizeInBytes number of bytes to write.
1232     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
1233     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1234     *     effect in static mode.
1235     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1236     *         to the audio sink.
1237     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1238     *     queuing as much audio data for playback as possible without blocking.
1239     * @return 0 or a positive number of bytes that were written, or
1240     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1241     */
1242    public int write(ByteBuffer audioData, int sizeInBytes,
1243            @WriteMode int writeMode) {
1244
1245        if (mState == STATE_UNINITIALIZED) {
1246            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1247            return ERROR_INVALID_OPERATION;
1248        }
1249
1250        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1251            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1252            return ERROR_BAD_VALUE;
1253        }
1254
1255        if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
1256            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
1257            return ERROR_BAD_VALUE;
1258        }
1259
1260        int ret = 0;
1261        if (audioData.isDirect()) {
1262            ret = native_write_native_bytes(audioData,
1263                    audioData.position(), sizeInBytes, mAudioFormat,
1264                    writeMode == WRITE_BLOCKING);
1265        } else {
1266            ret = native_write_byte(NioUtils.unsafeArray(audioData),
1267                    NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
1268                    sizeInBytes, mAudioFormat,
1269                    writeMode == WRITE_BLOCKING);
1270        }
1271
1272        if ((mDataLoadMode == MODE_STATIC)
1273                && (mState == STATE_NO_STATIC_DATA)
1274                && (ret > 0)) {
1275            // benign race with respect to other APIs that read mState
1276            mState = STATE_INITIALIZED;
1277        }
1278
1279        if (ret > 0) {
1280            audioData.position(audioData.position() + ret);
1281        }
1282
1283        return ret;
1284    }
1285
1286    /**
1287     * Notifies the native resource to reuse the audio data already loaded in the native
1288     * layer, that is to rewind to start of buffer.
1289     * The track's creation mode must be {@link #MODE_STATIC}.
1290     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1291     *  {@link #ERROR_INVALID_OPERATION}
1292     */
1293    public int reloadStaticData() {
1294        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
1295            return ERROR_INVALID_OPERATION;
1296        }
1297        return native_reload_static();
1298    }
1299
1300    //--------------------------------------------------------------------------
1301    // Audio effects management
1302    //--------------------
1303
1304    /**
1305     * Attaches an auxiliary effect to the audio track. A typical auxiliary
1306     * effect is a reverberation effect which can be applied on any sound source
1307     * that directs a certain amount of its energy to this effect. This amount
1308     * is defined by setAuxEffectSendLevel().
1309     * {@see #setAuxEffectSendLevel(float)}.
1310     * <p>After creating an auxiliary effect (e.g.
1311     * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
1312     * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
1313     * this method to attach the audio track to the effect.
1314     * <p>To detach the effect from the audio track, call this method with a
1315     * null effect id.
1316     *
1317     * @param effectId system wide unique id of the effect to attach
1318     * @return error code or success, see {@link #SUCCESS},
1319     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
1320     */
1321    public int attachAuxEffect(int effectId) {
1322        if (mState == STATE_UNINITIALIZED) {
1323            return ERROR_INVALID_OPERATION;
1324        }
1325        return native_attachAuxEffect(effectId);
1326    }
1327
1328    /**
1329     * Sets the send level of the audio track to the attached auxiliary effect
1330     * {@link #attachAuxEffect(int)}.  Effect levels
1331     * are clamped to the closed interval [0.0, max] where
1332     * max is the value of {@link #getMaxVolume}.
1333     * A value of 0.0 results in no effect, and a value of 1.0 is full send.
1334     * <p>By default the send level is 0.0f, so even if an effect is attached to the player
1335     * this method must be called for the effect to be applied.
1336     * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
1337     * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
1338     * so an appropriate conversion from linear UI input x to level is:
1339     * x == 0 -&gt; level = 0
1340     * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
1341     *
1342     * @param level linear send level
1343     * @return error code or success, see {@link #SUCCESS},
1344     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
1345     */
1346    public int setAuxEffectSendLevel(float level) {
1347        if (isRestricted()) {
1348            return SUCCESS;
1349        }
1350        if (mState == STATE_UNINITIALIZED) {
1351            return ERROR_INVALID_OPERATION;
1352        }
1353        level = clampGainOrLevel(level);
1354        int err = native_setAuxEffectSendLevel(level);
1355        return err == 0 ? SUCCESS : ERROR;
1356    }
1357
1358    //---------------------------------------------------------
1359    // Interface definitions
1360    //--------------------
1361    /**
1362     * Interface definition for a callback to be invoked when the playback head position of
1363     * an AudioTrack has reached a notification marker or has increased by a certain period.
1364     */
1365    public interface OnPlaybackPositionUpdateListener  {
1366        /**
1367         * Called on the listener to notify it that the previously set marker has been reached
1368         * by the playback head.
1369         */
1370        void onMarkerReached(AudioTrack track);
1371
1372        /**
1373         * Called on the listener to periodically notify it that the playback head has reached
1374         * a multiple of the notification period.
1375         */
1376        void onPeriodicNotification(AudioTrack track);
1377    }
1378
1379
1380    //---------------------------------------------------------
1381    // Inner classes
1382    //--------------------
1383    /**
1384     * Helper class to handle the forwarding of native events to the appropriate listener
1385     * (potentially) handled in a different thread
1386     */
1387    private class NativeEventHandlerDelegate {
1388        private final Handler mHandler;
1389
1390        NativeEventHandlerDelegate(final AudioTrack track,
1391                                   final OnPlaybackPositionUpdateListener listener,
1392                                   Handler handler) {
1393            // find the looper for our new event handler
1394            Looper looper;
1395            if (handler != null) {
1396                looper = handler.getLooper();
1397            } else {
1398                // no given handler, use the looper the AudioTrack was created in
1399                looper = mInitializationLooper;
1400            }
1401
1402            // construct the event handler with this looper
1403            if (looper != null) {
1404                // implement the event handler delegate
1405                mHandler = new Handler(looper) {
1406                    @Override
1407                    public void handleMessage(Message msg) {
1408                        if (track == null) {
1409                            return;
1410                        }
1411                        switch(msg.what) {
1412                        case NATIVE_EVENT_MARKER:
1413                            if (listener != null) {
1414                                listener.onMarkerReached(track);
1415                            }
1416                            break;
1417                        case NATIVE_EVENT_NEW_POS:
1418                            if (listener != null) {
1419                                listener.onPeriodicNotification(track);
1420                            }
1421                            break;
1422                        default:
1423                            loge("Unknown native event type: " + msg.what);
1424                            break;
1425                        }
1426                    }
1427                };
1428            } else {
1429                mHandler = null;
1430            }
1431        }
1432
1433        Handler getHandler() {
1434            return mHandler;
1435        }
1436    }
1437
1438
1439    //---------------------------------------------------------
1440    // Java methods called from the native side
1441    //--------------------
1442    @SuppressWarnings("unused")
1443    private static void postEventFromNative(Object audiotrack_ref,
1444            int what, int arg1, int arg2, Object obj) {
1445        //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
1446        AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
1447        if (track == null) {
1448            return;
1449        }
1450
1451        NativeEventHandlerDelegate delegate = track.mEventHandlerDelegate;
1452        if (delegate != null) {
1453            Handler handler = delegate.getHandler();
1454            if (handler != null) {
1455                Message m = handler.obtainMessage(what, arg1, arg2, obj);
1456                handler.sendMessage(m);
1457            }
1458        }
1459
1460    }
1461
1462
1463    //---------------------------------------------------------
1464    // Native methods called from the Java side
1465    //--------------------
1466
1467    private native final int native_setup(Object audiotrack_this,
1468            int streamType, int sampleRate, int channelMask, int audioFormat,
1469            int buffSizeInBytes, int mode, int[] sessionId);
1470
1471    private native final void native_finalize();
1472
1473    private native final void native_release();
1474
1475    private native final void native_start();
1476
1477    private native final void native_stop();
1478
1479    private native final void native_pause();
1480
1481    private native final void native_flush();
1482
1483    private native final int native_write_byte(byte[] audioData,
1484                                               int offsetInBytes, int sizeInBytes, int format,
1485                                               boolean isBlocking);
1486
1487    private native final int native_write_short(short[] audioData,
1488                                                int offsetInShorts, int sizeInShorts, int format);
1489
1490    private native final int native_write_native_bytes(Object audioData,
1491            int positionInBytes, int sizeInBytes, int format, boolean blocking);
1492
1493    private native final int native_reload_static();
1494
1495    private native final int native_get_native_frame_count();
1496
1497    private native final void native_setVolume(float leftVolume, float rightVolume);
1498
1499    private native final int native_set_playback_rate(int sampleRateInHz);
1500    private native final int native_get_playback_rate();
1501
1502    private native final int native_set_marker_pos(int marker);
1503    private native final int native_get_marker_pos();
1504
1505    private native final int native_set_pos_update_period(int updatePeriod);
1506    private native final int native_get_pos_update_period();
1507
1508    private native final int native_set_position(int position);
1509    private native final int native_get_position();
1510
1511    private native final int native_get_latency();
1512
1513    // longArray must be a non-null array of length >= 2
1514    // [0] is assigned the frame position
1515    // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
1516    private native final int native_get_timestamp(long[] longArray);
1517
1518    private native final int native_set_loop(int start, int end, int loopCount);
1519
1520    static private native final int native_get_output_sample_rate(int streamType);
1521    static private native final int native_get_min_buff_size(
1522            int sampleRateInHz, int channelConfig, int audioFormat);
1523
1524    private native final int native_attachAuxEffect(int effectId);
1525    private native final int native_setAuxEffectSendLevel(float level);
1526
1527    //---------------------------------------------------------
1528    // Utility methods
1529    //------------------
1530
1531    private static void logd(String msg) {
1532        Log.d(TAG, msg);
1533    }
1534
1535    private static void loge(String msg) {
1536        Log.e(TAG, msg);
1537    }
1538
1539}
1540