AudioTrack.java revision 7d60bcd2d950e3571c00ce9f1c492c6bd58334c9
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.media;
18
19import java.lang.annotation.Retention;
20import java.lang.annotation.RetentionPolicy;
21import java.lang.ref.WeakReference;
22import java.nio.ByteBuffer;
23import java.nio.NioUtils;
24
25import android.annotation.IntDef;
26import android.app.ActivityThread;
27import android.app.AppOpsManager;
28import android.content.Context;
29import android.os.Handler;
30import android.os.IBinder;
31import android.os.Looper;
32import android.os.Message;
33import android.os.Process;
34import android.os.RemoteException;
35import android.os.ServiceManager;
36import android.util.Log;
37
38import com.android.internal.app.IAppOpsService;
39
40
41/**
42 * The AudioTrack class manages and plays a single audio resource for Java applications.
43 * It allows streaming of PCM audio buffers to the audio sink for playback. This is
44 * achieved by "pushing" the data to the AudioTrack object using one of the
45 *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
46 *  and {@link #write(float[], int, int, int)} methods.
47 *
48 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
49 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
50 * one of the {@code write()} methods. These are blocking and return when the data has been
51 * transferred from the Java layer to the native layer and queued for playback. The streaming
52 * mode is most useful when playing blocks of audio data that for instance are:
53 *
54 * <ul>
55 *   <li>too big to fit in memory because of the duration of the sound to play,</li>
56 *   <li>too big to fit in memory because of the characteristics of the audio data
57 *         (high sampling rate, bits per sample ...)</li>
58 *   <li>received or generated while previously queued audio is playing.</li>
59 * </ul>
60 *
61 * The static mode should be chosen when dealing with short sounds that fit in memory and
62 * that need to be played with the smallest latency possible. The static mode will
63 * therefore be preferred for UI and game sounds that are played often, and with the
64 * smallest overhead possible.
65 *
66 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
67 * The size of this buffer, specified during the construction, determines how long an AudioTrack
68 * can play before running out of data.<br>
69 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
70 * be played from it.<br>
71 * For the streaming mode, data will be written to the audio sink in chunks of
72 * sizes less than or equal to the total buffer size.
73 *
74 * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
75 */
76public class AudioTrack
77{
78    //---------------------------------------------------------
79    // Constants
80    //--------------------
81    /** Minimum value for a linear gain or auxiliary effect level.
82     *  This value must be exactly equal to 0.0f; do not change it.
83     */
84    private static final float GAIN_MIN = 0.0f;
85    /** Maximum value for a linear gain or auxiliary effect level.
86     *  This value must be greater than or equal to 1.0f.
87     */
88    private static final float GAIN_MAX = 1.0f;
89
90    /** Minimum value for sample rate */
91    private static final int SAMPLE_RATE_HZ_MIN = 4000;
92    /** Maximum value for sample rate */
93    private static final int SAMPLE_RATE_HZ_MAX = 48000;
94
95    /** indicates AudioTrack state is stopped */
96    public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
97    /** indicates AudioTrack state is paused */
98    public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
99    /** indicates AudioTrack state is playing */
100    public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
101
102    // keep these values in sync with android_media_AudioTrack.cpp
103    /**
104     * Creation mode where audio data is transferred from Java to the native layer
105     * only once before the audio starts playing.
106     */
107    public static final int MODE_STATIC = 0;
108    /**
109     * Creation mode where audio data is streamed from Java to the native layer
110     * as the audio is playing.
111     */
112    public static final int MODE_STREAM = 1;
113
114    /**
115     * State of an AudioTrack that was not successfully initialized upon creation.
116     */
117    public static final int STATE_UNINITIALIZED = 0;
118    /**
119     * State of an AudioTrack that is ready to be used.
120     */
121    public static final int STATE_INITIALIZED   = 1;
122    /**
123     * State of a successfully initialized AudioTrack that uses static data,
124     * but that hasn't received that data yet.
125     */
126    public static final int STATE_NO_STATIC_DATA = 2;
127
128    // Error codes:
129    // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
130    /**
131     * Denotes a successful operation.
132     */
133    public  static final int SUCCESS                               = 0;
134    /**
135     * Denotes a generic operation failure.
136     */
137    public  static final int ERROR                                 = -1;
138    /**
139     * Denotes a failure due to the use of an invalid value.
140     */
141    public  static final int ERROR_BAD_VALUE                       = -2;
142    /**
143     * Denotes a failure due to the improper use of a method.
144     */
145    public  static final int ERROR_INVALID_OPERATION               = -3;
146
147    private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
148    private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
149    private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
150    private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
151    private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
152
153    // Events:
154    // to keep in sync with frameworks/av/include/media/AudioTrack.h
155    /**
156     * Event id denotes when playback head has reached a previously set marker.
157     */
158    private static final int NATIVE_EVENT_MARKER  = 3;
159    /**
160     * Event id denotes when previously set update period has elapsed during playback.
161     */
162    private static final int NATIVE_EVENT_NEW_POS = 4;
163
164    private final static String TAG = "android.media.AudioTrack";
165
166
167    /** @hide */
168    @IntDef({
169        WRITE_BLOCKING,
170        WRITE_NON_BLOCKING
171    })
172    @Retention(RetentionPolicy.SOURCE)
173    public @interface WriteMode {}
174
175    /**
176     * The write mode indicating the write operation will block until all data has been written,
177     * to be used in {@link #write(ByteBuffer, int, int)}
178     */
179    public final static int WRITE_BLOCKING = 0;
180    /**
181     * The write mode indicating the write operation will return immediately after
182     * queuing as much audio data for playback as possible without blocking, to be used in
183     * {@link #write(ByteBuffer, int, int)}.
184     */
185    public final static int WRITE_NON_BLOCKING = 1;
186
187    //--------------------------------------------------------------------------
188    // Member variables
189    //--------------------
190    /**
191     * Indicates the state of the AudioTrack instance.
192     */
193    private int mState = STATE_UNINITIALIZED;
194    /**
195     * Indicates the play state of the AudioTrack instance.
196     */
197    private int mPlayState = PLAYSTATE_STOPPED;
198    /**
199     * Lock to make sure mPlayState updates are reflecting the actual state of the object.
200     */
201    private final Object mPlayStateLock = new Object();
202    /**
203     * Sizes of the native audio buffer.
204     */
205    private int mNativeBufferSizeInBytes = 0;
206    private int mNativeBufferSizeInFrames = 0;
207    /**
208     * Handler for events coming from the native code.
209     */
210    private NativeEventHandlerDelegate mEventHandlerDelegate;
211    /**
212     * Looper associated with the thread that creates the AudioTrack instance.
213     */
214    private final Looper mInitializationLooper;
215    /**
216     * The audio data source sampling rate in Hz.
217     */
218    private int mSampleRate; // initialized by all constructors
219    /**
220     * The number of audio output channels (1 is mono, 2 is stereo).
221     */
222    private int mChannelCount = 1;
223    /**
224     * The audio channel mask.
225     */
226    private int mChannels = AudioFormat.CHANNEL_OUT_MONO;
227
228    /**
229     * The type of the audio stream to play. See
230     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
231     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
232     *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
233     *   {@link AudioManager#STREAM_DTMF}.
234     */
235    private int mStreamType = AudioManager.STREAM_MUSIC;
236    /**
237     * The way audio is consumed by the audio sink, streaming or static.
238     */
239    private int mDataLoadMode = MODE_STREAM;
240    /**
241     * The current audio channel configuration.
242     */
243    private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
244    /**
245     * The encoding of the audio samples.
246     * @see AudioFormat#ENCODING_PCM_8BIT
247     * @see AudioFormat#ENCODING_PCM_16BIT
248     * @see AudioFormat#ENCODING_PCM_FLOAT
249     */
250    private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
251    /**
252     * Audio session ID
253     */
254    private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
255    /**
256     * Reference to the app-ops service.
257     */
258    private final IAppOpsService mAppOps;
259
260    //--------------------------------
261    // Used exclusively by native code
262    //--------------------
263    /**
264     * Accessed by native methods: provides access to C++ AudioTrack object.
265     */
266    @SuppressWarnings("unused")
267    private long mNativeTrackInJavaObj;
268    /**
269     * Accessed by native methods: provides access to the JNI data (i.e. resources used by
270     * the native AudioTrack object, but not stored in it).
271     */
272    @SuppressWarnings("unused")
273    private long mJniData;
274
275
276    //--------------------------------------------------------------------------
277    // Constructor, Finalize
278    //--------------------
279    /**
280     * Class constructor.
281     * @param streamType the type of the audio stream. See
282     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
283     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
284     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
285     * @param sampleRateInHz the initial source sample rate expressed in Hz.
286     * @param channelConfig describes the configuration of the audio channels.
287     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
288     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
289     * @param audioFormat the format in which the audio data is represented.
290     *   See {@link AudioFormat#ENCODING_PCM_16BIT},
291     *   {@link AudioFormat#ENCODING_PCM_8BIT},
292     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
293     * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
294     *   read from for playback.
295     *   If track's creation mode is {@link #MODE_STREAM}, you can write data into
296     *   this buffer in chunks less than or equal to this size, and it is typical to use
297     *   chunks of 1/2 of the total size to permit double-buffering.
298     *   If the track's creation mode is {@link #MODE_STATIC},
299     *   this is the maximum length sample, or audio clip, that can be played by this instance.
300     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
301     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
302     *   smaller than getMinBufferSize() will result in an initialization failure.
303     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
304     * @throws java.lang.IllegalArgumentException
305     */
306    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
307            int bufferSizeInBytes, int mode)
308    throws IllegalArgumentException {
309        this(streamType, sampleRateInHz, channelConfig, audioFormat,
310                bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE);
311    }
312
313    /**
314     * Class constructor with audio session. Use this constructor when the AudioTrack must be
315     * attached to a particular audio session. The primary use of the audio session ID is to
316     * associate audio effects to a particular instance of AudioTrack: if an audio session ID
317     * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
318     * and media players in the same session and not to the output mix.
319     * When an AudioTrack is created without specifying a session, it will create its own session
320     * which can be retrieved by calling the {@link #getAudioSessionId()} method.
321     * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
322     * session
323     * with all other media players or audio tracks in the same session, otherwise a new session
324     * will be created for this track if none is supplied.
325     * @param streamType the type of the audio stream. See
326     *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
327     *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
328     *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
329     * @param sampleRateInHz the initial source sample rate expressed in Hz.
330     * @param channelConfig describes the configuration of the audio channels.
331     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
332     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
333     * @param audioFormat the format in which the audio data is represented.
334     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
335     *   {@link AudioFormat#ENCODING_PCM_8BIT},
336     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
337     * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
338     *   from for playback. If using the AudioTrack in streaming mode, you can write data into
339     *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
340     *   this is the maximum size of the sound that will be played for this instance.
341     *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
342     *   for the successful creation of an AudioTrack instance in streaming mode. Using values
343     *   smaller than getMinBufferSize() will result in an initialization failure.
344     * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
345     * @param sessionId Id of audio session the AudioTrack must be attached to
346     * @throws java.lang.IllegalArgumentException
347     */
348    public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
349            int bufferSizeInBytes, int mode, int sessionId)
350    throws IllegalArgumentException {
351        // mState already == STATE_UNINITIALIZED
352
353        // remember which looper is associated with the AudioTrack instantiation
354        Looper looper;
355        if ((looper = Looper.myLooper()) == null) {
356            looper = Looper.getMainLooper();
357        }
358        mInitializationLooper = looper;
359
360        audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);
361
362        audioBuffSizeCheck(bufferSizeInBytes);
363
364        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
365        mAppOps = IAppOpsService.Stub.asInterface(b);
366
367        if (sessionId < 0) {
368            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
369        }
370
371        int[] session = new int[1];
372        session[0] = sessionId;
373        // native initialization
374        int initResult = native_setup(new WeakReference<AudioTrack>(this),
375                mStreamType, mSampleRate, mChannels, mAudioFormat,
376                mNativeBufferSizeInBytes, mDataLoadMode, session);
377        if (initResult != SUCCESS) {
378            loge("Error code "+initResult+" when initializing AudioTrack.");
379            return; // with mState == STATE_UNINITIALIZED
380        }
381
382        mSessionId = session[0];
383
384        if (mDataLoadMode == MODE_STATIC) {
385            mState = STATE_NO_STATIC_DATA;
386        } else {
387            mState = STATE_INITIALIZED;
388        }
389    }
390
391    // mask of all the channels supported by this implementation
392    private static final int SUPPORTED_OUT_CHANNELS =
393            AudioFormat.CHANNEL_OUT_FRONT_LEFT |
394            AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
395            AudioFormat.CHANNEL_OUT_FRONT_CENTER |
396            AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
397            AudioFormat.CHANNEL_OUT_BACK_LEFT |
398            AudioFormat.CHANNEL_OUT_BACK_RIGHT |
399            AudioFormat.CHANNEL_OUT_BACK_CENTER;
400
401    // Convenience method for the constructor's parameter checks.
402    // This is where constructor IllegalArgumentException-s are thrown
403    // postconditions:
404    //    mStreamType is valid
405    //    mChannelCount is valid
406    //    mChannels is valid
407    //    mAudioFormat is valid
408    //    mSampleRate is valid
409    //    mDataLoadMode is valid
410    private void audioParamCheck(int streamType, int sampleRateInHz,
411                                 int channelConfig, int audioFormat, int mode) {
412
413        //--------------
414        // stream type
415        if( (streamType != AudioManager.STREAM_ALARM) && (streamType != AudioManager.STREAM_MUSIC)
416           && (streamType != AudioManager.STREAM_RING) && (streamType != AudioManager.STREAM_SYSTEM)
417           && (streamType != AudioManager.STREAM_VOICE_CALL)
418           && (streamType != AudioManager.STREAM_NOTIFICATION)
419           && (streamType != AudioManager.STREAM_BLUETOOTH_SCO)
420           && (streamType != AudioManager.STREAM_DTMF)) {
421            throw new IllegalArgumentException("Invalid stream type.");
422        }
423        mStreamType = streamType;
424
425        //--------------
426        // sample rate, note these values are subject to change
427        if ( (sampleRateInHz < 4000) || (sampleRateInHz > 48000) ) {
428            throw new IllegalArgumentException(sampleRateInHz
429                    + "Hz is not a supported sample rate.");
430        }
431        mSampleRate = sampleRateInHz;
432
433        //--------------
434        // channel config
435        mChannelConfiguration = channelConfig;
436
437        switch (channelConfig) {
438        case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
439        case AudioFormat.CHANNEL_OUT_MONO:
440        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
441            mChannelCount = 1;
442            mChannels = AudioFormat.CHANNEL_OUT_MONO;
443            break;
444        case AudioFormat.CHANNEL_OUT_STEREO:
445        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
446            mChannelCount = 2;
447            mChannels = AudioFormat.CHANNEL_OUT_STEREO;
448            break;
449        default:
450            if (!isMultichannelConfigSupported(channelConfig)) {
451                // input channel configuration features unsupported channels
452                throw new IllegalArgumentException("Unsupported channel configuration.");
453            }
454            mChannels = channelConfig;
455            mChannelCount = Integer.bitCount(channelConfig);
456        }
457
458        //--------------
459        // audio format
460        switch (audioFormat) {
461        case AudioFormat.ENCODING_DEFAULT:
462            mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
463            break;
464        case AudioFormat.ENCODING_PCM_16BIT:
465        case AudioFormat.ENCODING_PCM_8BIT:
466        case AudioFormat.ENCODING_PCM_FLOAT:
467            mAudioFormat = audioFormat;
468            break;
469        default:
470            throw new IllegalArgumentException("Unsupported sample encoding."
471                + " Should be ENCODING_PCM_8BIT or ENCODING_PCM_16BIT"
472                + " or ENCODING_PCM_FLOAT"
473                + ".");
474        }
475
476        //--------------
477        // audio load mode
478        if ( (mode != MODE_STREAM) && (mode != MODE_STATIC) ) {
479            throw new IllegalArgumentException("Invalid mode.");
480        }
481        mDataLoadMode = mode;
482    }
483
484    /**
485     * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
486     * @param channelConfig the mask to validate
487     * @return false if the AudioTrack can't be used with such a mask
488     */
489    private static boolean isMultichannelConfigSupported(int channelConfig) {
490        // check for unsupported channels
491        if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
492            loge("Channel configuration features unsupported channels");
493            return false;
494        }
495        // check for unsupported multichannel combinations:
496        // - FL/FR must be present
497        // - L/R channels must be paired (e.g. no single L channel)
498        final int frontPair =
499                AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
500        if ((channelConfig & frontPair) != frontPair) {
501                loge("Front channels must be present in multichannel configurations");
502                return false;
503        }
504        final int backPair =
505                AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
506        if ((channelConfig & backPair) != 0) {
507            if ((channelConfig & backPair) != backPair) {
508                loge("Rear channels can't be used independently");
509                return false;
510            }
511        }
512        return true;
513    }
514
515
516    // Convenience method for the constructor's audio buffer size check.
517    // preconditions:
518    //    mChannelCount is valid
519    //    mAudioFormat is valid
520    // postcondition:
521    //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
522    private void audioBuffSizeCheck(int audioBufferSize) {
523        // NB: this section is only valid with PCM data.
524        //     To update when supporting compressed formats
525        int frameSizeInBytes = mChannelCount
526                * (AudioFormat.getBytesPerSample(mAudioFormat));
527        if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
528            throw new IllegalArgumentException("Invalid audio buffer size.");
529        }
530
531        mNativeBufferSizeInBytes = audioBufferSize;
532        mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
533    }
534
535
536    /**
537     * Releases the native AudioTrack resources.
538     */
539    public void release() {
540        // even though native_release() stops the native AudioTrack, we need to stop
541        // AudioTrack subclasses too.
542        try {
543            stop();
544        } catch(IllegalStateException ise) {
545            // don't raise an exception, we're releasing the resources.
546        }
547        native_release();
548        mState = STATE_UNINITIALIZED;
549    }
550
551    @Override
552    protected void finalize() {
553        native_finalize();
554    }
555
556    //--------------------------------------------------------------------------
557    // Getters
558    //--------------------
559    /**
560     * Returns the minimum gain value, which is the constant 0.0.
561     * Gain values less than 0.0 will be clamped to 0.0.
562     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
563     * @return the minimum value, which is the constant 0.0.
564     */
565    static public float getMinVolume() {
566        return GAIN_MIN;
567    }
568
569    /**
570     * Returns the maximum gain value, which is greater than or equal to 1.0.
571     * Gain values greater than the maximum will be clamped to the maximum.
572     * <p>The word "volume" in the API name is historical; this is actually a gain.
573     * expressed as a linear multiplier on sample values, where a maximum value of 1.0
574     * corresponds to a gain of 0 dB (sample values left unmodified).
575     * @return the maximum value, which is greater than or equal to 1.0.
576     */
577    static public float getMaxVolume() {
578        return GAIN_MAX;
579    }
580
581    /**
582     * Returns the configured audio data sample rate in Hz
583     */
584    public int getSampleRate() {
585        return mSampleRate;
586    }
587
588    /**
589     * Returns the current playback rate in Hz.
590     */
591    public int getPlaybackRate() {
592        return native_get_playback_rate();
593    }
594
595    /**
596     * Returns the configured audio data format. See {@link AudioFormat#ENCODING_PCM_16BIT}
597     * and {@link AudioFormat#ENCODING_PCM_8BIT}.
598     */
599    public int getAudioFormat() {
600        return mAudioFormat;
601    }
602
603    /**
604     * Returns the type of audio stream this AudioTrack is configured for.
605     * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
606     * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
607     * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
608     * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
609     */
610    public int getStreamType() {
611        return mStreamType;
612    }
613
614    /**
615     * Returns the configured channel configuration.
616     * See {@link AudioFormat#CHANNEL_OUT_MONO}
617     * and {@link AudioFormat#CHANNEL_OUT_STEREO}.
618     */
619    public int getChannelConfiguration() {
620        return mChannelConfiguration;
621    }
622
623    /**
624     * Returns the configured number of channels.
625     */
626    public int getChannelCount() {
627        return mChannelCount;
628    }
629
630    /**
631     * Returns the state of the AudioTrack instance. This is useful after the
632     * AudioTrack instance has been created to check if it was initialized
633     * properly. This ensures that the appropriate resources have been acquired.
634     * @see #STATE_INITIALIZED
635     * @see #STATE_NO_STATIC_DATA
636     * @see #STATE_UNINITIALIZED
637     */
638    public int getState() {
639        return mState;
640    }
641
642    /**
643     * Returns the playback state of the AudioTrack instance.
644     * @see #PLAYSTATE_STOPPED
645     * @see #PLAYSTATE_PAUSED
646     * @see #PLAYSTATE_PLAYING
647     */
648    public int getPlayState() {
649        synchronized (mPlayStateLock) {
650            return mPlayState;
651        }
652    }
653
654    /**
655     *  Returns the "native frame count", derived from the bufferSizeInBytes specified at
656     *  creation time and converted to frame units.
657     *  If track's creation mode is {@link #MODE_STATIC},
658     *  it is equal to the specified bufferSizeInBytes converted to frame units.
659     *  If track's creation mode is {@link #MODE_STREAM},
660     *  it is typically greater than or equal to the specified bufferSizeInBytes converted to frame
661     *  units; it may be rounded up to a larger value if needed by the target device implementation.
662     *  @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
663     *  See {@link AudioManager#getProperty(String)} for key
664     *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
665     */
666    @Deprecated
667    protected int getNativeFrameCount() {
668        return native_get_native_frame_count();
669    }
670
671    /**
672     * Returns marker position expressed in frames.
673     * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
674     * or zero if marker is disabled.
675     */
676    public int getNotificationMarkerPosition() {
677        return native_get_marker_pos();
678    }
679
680    /**
681     * Returns the notification update period expressed in frames.
682     * Zero means that no position update notifications are being delivered.
683     */
684    public int getPositionNotificationPeriod() {
685        return native_get_pos_update_period();
686    }
687
688    /**
689     * Returns the playback head position expressed in frames.
690     * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
691     * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
692     * This is a continuously advancing counter.  It will wrap (overflow) periodically,
693     * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
694     * It is reset to zero by flush(), reload(), and stop().
695     */
696    public int getPlaybackHeadPosition() {
697        return native_get_position();
698    }
699
700    /**
701     * Returns this track's estimated latency in milliseconds. This includes the latency due
702     * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
703     *
704     * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
705     * a better solution.
706     * @hide
707     */
708    public int getLatency() {
709        return native_get_latency();
710    }
711
712    /**
713     *  Returns the output sample rate in Hz for the specified stream type.
714     */
715    static public int getNativeOutputSampleRate(int streamType) {
716        return native_get_output_sample_rate(streamType);
717    }
718
719    /**
720     * Returns the minimum buffer size required for the successful creation of an AudioTrack
721     * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
722     * guarantee a smooth playback under load, and higher values should be chosen according to
723     * the expected frequency at which the buffer will be refilled with additional data to play.
724     * For example, if you intend to dynamically set the source sample rate of an AudioTrack
725     * to a higher value than the initial source sample rate, be sure to configure the buffer size
726     * based on the highest planned sample rate.
727     * @param sampleRateInHz the source sample rate expressed in Hz.
728     * @param channelConfig describes the configuration of the audio channels.
729     *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
730     *   {@link AudioFormat#CHANNEL_OUT_STEREO}
731     * @param audioFormat the format in which the audio data is represented.
732     *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
733     *   {@link AudioFormat#ENCODING_PCM_8BIT},
734     *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
735     * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
736     *   or {@link #ERROR} if unable to query for output properties,
737     *   or the minimum buffer size expressed in bytes.
738     */
739    static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
740        int channelCount = 0;
741        switch(channelConfig) {
742        case AudioFormat.CHANNEL_OUT_MONO:
743        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
744            channelCount = 1;
745            break;
746        case AudioFormat.CHANNEL_OUT_STEREO:
747        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
748            channelCount = 2;
749            break;
750        default:
751            if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
752                // input channel configuration features unsupported channels
753                loge("getMinBufferSize(): Invalid channel configuration.");
754                return ERROR_BAD_VALUE;
755            } else {
756                channelCount = Integer.bitCount(channelConfig);
757            }
758        }
759
760        if ((audioFormat != AudioFormat.ENCODING_PCM_16BIT)
761            && (audioFormat != AudioFormat.ENCODING_PCM_8BIT)
762            && (audioFormat != AudioFormat.ENCODING_PCM_FLOAT)) {
763            loge("getMinBufferSize(): Invalid audio format.");
764            return ERROR_BAD_VALUE;
765        }
766
767        // sample rate, note these values are subject to change
768        if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {
769            loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
770            return ERROR_BAD_VALUE;
771        }
772
773        int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
774        if (size <= 0) {
775            loge("getMinBufferSize(): error querying hardware");
776            return ERROR;
777        }
778        else {
779            return size;
780        }
781    }
782
783    /**
784     * Returns the audio session ID.
785     *
786     * @return the ID of the audio session this AudioTrack belongs to.
787     */
788    public int getAudioSessionId() {
789        return mSessionId;
790    }
791
792   /**
793    * Poll for a timestamp on demand.
794    *
795    * Use if you need to get the most recent timestamp outside of the event callback handler.
796    * Calling this method too often may be inefficient;
797    * if you need a high-resolution mapping between frame position and presentation time,
798    * consider implementing that at application level, based on low-resolution timestamps.
799    * The audio data at the returned position may either already have been
800    * presented, or may have not yet been presented but is committed to be presented.
801    * It is not possible to request the time corresponding to a particular position,
802    * or to request the (fractional) position corresponding to a particular time.
803    * If you need such features, consider implementing them at application level.
804    *
805    * @param timestamp a reference to a non-null AudioTimestamp instance allocated
806    *        and owned by caller.
807    * @return true if a timestamp is available, or false if no timestamp is available.
808    *         If a timestamp if available,
809    *         the AudioTimestamp instance is filled in with a position in frame units, together
810    *         with the estimated time when that frame was presented or is committed to
811    *         be presented.
812    *         In the case that no timestamp is available, any supplied instance is left unaltered.
813    */
814    public boolean getTimestamp(AudioTimestamp timestamp)
815    {
816        if (timestamp == null) {
817            throw new IllegalArgumentException();
818        }
819        // It's unfortunate, but we have to either create garbage every time or use synchronized
820        long[] longArray = new long[2];
821        int ret = native_get_timestamp(longArray);
822        if (ret != SUCCESS) {
823            return false;
824        }
825        timestamp.framePosition = longArray[0];
826        timestamp.nanoTime = longArray[1];
827        return true;
828    }
829
830
831    //--------------------------------------------------------------------------
832    // Initialization / configuration
833    //--------------------
834    /**
835     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
836     * for each periodic playback head position update.
837     * Notifications will be received in the same thread as the one in which the AudioTrack
838     * instance was created.
839     * @param listener
840     */
841    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
842        setPlaybackPositionUpdateListener(listener, null);
843    }
844
845    /**
846     * Sets the listener the AudioTrack notifies when a previously set marker is reached or
847     * for each periodic playback head position update.
848     * Use this method to receive AudioTrack events in the Handler associated with another
849     * thread than the one in which you created the AudioTrack instance.
850     * @param listener
851     * @param handler the Handler that will receive the event notification messages.
852     */
853    public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
854                                                    Handler handler) {
855        if (listener != null) {
856            mEventHandlerDelegate = new NativeEventHandlerDelegate(this, listener, handler);
857        } else {
858            mEventHandlerDelegate = null;
859        }
860    }
861
862
863    private static float clampGainOrLevel(float gainOrLevel) {
864        if (Float.isNaN(gainOrLevel)) {
865            throw new IllegalArgumentException();
866        }
867        if (gainOrLevel < GAIN_MIN) {
868            gainOrLevel = GAIN_MIN;
869        } else if (gainOrLevel > GAIN_MAX) {
870            gainOrLevel = GAIN_MAX;
871        }
872        return gainOrLevel;
873    }
874
875
876     /**
877     * Sets the specified left and right output gain values on the AudioTrack.
878     * <p>Gain values are clamped to the closed interval [0.0, max] where
879     * max is the value of {@link #getMaxVolume}.
880     * A value of 0.0 results in zero gain (silence), and
881     * a value of 1.0 means unity gain (signal unchanged).
882     * The default value is 1.0 meaning unity gain.
883     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
884     * @param leftGain output gain for the left channel.
885     * @param rightGain output gain for the right channel
886     * @return error code or success, see {@link #SUCCESS},
887     *    {@link #ERROR_INVALID_OPERATION}
888     * @deprecated Applications should use {@link #setVolume} instead, as it
889     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
890     */
891    public int setStereoVolume(float leftGain, float rightGain) {
892        if (isRestricted()) {
893            return SUCCESS;
894        }
895        if (mState == STATE_UNINITIALIZED) {
896            return ERROR_INVALID_OPERATION;
897        }
898
899        leftGain = clampGainOrLevel(leftGain);
900        rightGain = clampGainOrLevel(rightGain);
901
902        native_setVolume(leftGain, rightGain);
903
904        return SUCCESS;
905    }
906
907
908    /**
909     * Sets the specified output gain value on all channels of this track.
910     * <p>Gain values are clamped to the closed interval [0.0, max] where
911     * max is the value of {@link #getMaxVolume}.
912     * A value of 0.0 results in zero gain (silence), and
913     * a value of 1.0 means unity gain (signal unchanged).
914     * The default value is 1.0 meaning unity gain.
915     * <p>This API is preferred over {@link #setStereoVolume}, as it
916     * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
917     * <p>The word "volume" in the API name is historical; this is actually a linear gain.
918     * @param gain output gain for all channels.
919     * @return error code or success, see {@link #SUCCESS},
920     *    {@link #ERROR_INVALID_OPERATION}
921     */
922    public int setVolume(float gain) {
923        return setStereoVolume(gain, gain);
924    }
925
926
927    /**
928     * Sets the playback sample rate for this track. This sets the sampling rate at which
929     * the audio data will be consumed and played back
930     * (as set by the sampleRateInHz parameter in the
931     * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
932     * not the original sampling rate of the
933     * content. For example, setting it to half the sample rate of the content will cause the
934     * playback to last twice as long, but will also result in a pitch shift down by one octave.
935     * The valid sample rate range is from 1 Hz to twice the value returned by
936     * {@link #getNativeOutputSampleRate(int)}.
937     * @param sampleRateInHz the sample rate expressed in Hz
938     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
939     *    {@link #ERROR_INVALID_OPERATION}
940     */
941    public int setPlaybackRate(int sampleRateInHz) {
942        if (mState != STATE_INITIALIZED) {
943            return ERROR_INVALID_OPERATION;
944        }
945        if (sampleRateInHz <= 0) {
946            return ERROR_BAD_VALUE;
947        }
948        return native_set_playback_rate(sampleRateInHz);
949    }
950
951
952    /**
953     * Sets the position of the notification marker.  At most one marker can be active.
954     * @param markerInFrames marker position in wrapping frame units similar to
955     * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
956     * To set a marker at a position which would appear as zero due to wraparound,
957     * a workaround is to use a non-zero position near zero, such as -1 or 1.
958     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
959     *  {@link #ERROR_INVALID_OPERATION}
960     */
961    public int setNotificationMarkerPosition(int markerInFrames) {
962        if (mState == STATE_UNINITIALIZED) {
963            return ERROR_INVALID_OPERATION;
964        }
965        return native_set_marker_pos(markerInFrames);
966    }
967
968
969    /**
970     * Sets the period for the periodic notification event.
971     * @param periodInFrames update period expressed in frames
972     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
973     */
974    public int setPositionNotificationPeriod(int periodInFrames) {
975        if (mState == STATE_UNINITIALIZED) {
976            return ERROR_INVALID_OPERATION;
977        }
978        return native_set_pos_update_period(periodInFrames);
979    }
980
981
982    /**
983     * Sets the playback head position.
984     * The track must be stopped or paused for the position to be changed,
985     * and must use the {@link #MODE_STATIC} mode.
986     * @param positionInFrames playback head position expressed in frames
987     * Zero corresponds to start of buffer.
988     * The position must not be greater than the buffer size in frames, or negative.
989     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
990     *    {@link #ERROR_INVALID_OPERATION}
991     */
992    public int setPlaybackHeadPosition(int positionInFrames) {
993        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
994                getPlayState() == PLAYSTATE_PLAYING) {
995            return ERROR_INVALID_OPERATION;
996        }
997        if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
998            return ERROR_BAD_VALUE;
999        }
1000        return native_set_position(positionInFrames);
1001    }
1002
1003    /**
1004     * Sets the loop points and the loop count. The loop can be infinite.
1005     * Similarly to setPlaybackHeadPosition,
1006     * the track must be stopped or paused for the loop points to be changed,
1007     * and must use the {@link #MODE_STATIC} mode.
1008     * @param startInFrames loop start marker expressed in frames
1009     * Zero corresponds to start of buffer.
1010     * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1011     * @param endInFrames loop end marker expressed in frames
1012     * The total buffer size in frames corresponds to end of buffer.
1013     * The end marker must not be greater than the buffer size in frames.
1014     * For looping, the end marker must not be less than or equal to the start marker,
1015     * but to disable looping
1016     * it is permitted for start marker, end marker, and loop count to all be 0.
1017     * @param loopCount the number of times the loop is looped.
1018     *    A value of -1 means infinite looping, and 0 disables looping.
1019     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1020     *    {@link #ERROR_INVALID_OPERATION}
1021     */
1022    public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1023        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED ||
1024                getPlayState() == PLAYSTATE_PLAYING) {
1025            return ERROR_INVALID_OPERATION;
1026        }
1027        if (loopCount == 0) {
1028            ;   // explicitly allowed as an exception to the loop region range check
1029        } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1030                startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1031            return ERROR_BAD_VALUE;
1032        }
1033        return native_set_loop(startInFrames, endInFrames, loopCount);
1034    }
1035
1036    /**
1037     * Sets the initialization state of the instance. This method was originally intended to be used
1038     * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1039     * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1040     * @param state the state of the AudioTrack instance
1041     * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1042     */
1043    @Deprecated
1044    protected void setState(int state) {
1045        mState = state;
1046    }
1047
1048
1049    //---------------------------------------------------------
1050    // Transport control methods
1051    //--------------------
1052    /**
1053     * Starts playing an AudioTrack.
1054     * If track's creation mode is {@link #MODE_STATIC}, you must have called write() prior.
1055     *
1056     * @throws IllegalStateException
1057     */
1058    public void play()
1059    throws IllegalStateException {
1060        if (mState != STATE_INITIALIZED) {
1061            throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1062        }
1063        if (isRestricted()) {
1064            setVolume(0);
1065        }
1066        synchronized(mPlayStateLock) {
1067            native_start();
1068            mPlayState = PLAYSTATE_PLAYING;
1069        }
1070    }
1071
1072    private boolean isRestricted() {
1073        try {
1074            final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, mStreamType,
1075                    Process.myUid(), ActivityThread.currentPackageName());
1076            return mode != AppOpsManager.MODE_ALLOWED;
1077        } catch (RemoteException e) {
1078            return false;
1079        }
1080    }
1081
1082    /**
1083     * Stops playing the audio data.
1084     * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
1085     * after the last buffer that was written has been played. For an immediate stop, use
1086     * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
1087     * back yet.
1088     * @throws IllegalStateException
1089     */
1090    public void stop()
1091    throws IllegalStateException {
1092        if (mState != STATE_INITIALIZED) {
1093            throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
1094        }
1095
1096        // stop playing
1097        synchronized(mPlayStateLock) {
1098            native_stop();
1099            mPlayState = PLAYSTATE_STOPPED;
1100        }
1101    }
1102
1103    /**
1104     * Pauses the playback of the audio data. Data that has not been played
1105     * back will not be discarded. Subsequent calls to {@link #play} will play
1106     * this data back. See {@link #flush()} to discard this data.
1107     *
1108     * @throws IllegalStateException
1109     */
1110    public void pause()
1111    throws IllegalStateException {
1112        if (mState != STATE_INITIALIZED) {
1113            throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
1114        }
1115        //logd("pause()");
1116
1117        // pause playback
1118        synchronized(mPlayStateLock) {
1119            native_pause();
1120            mPlayState = PLAYSTATE_PAUSED;
1121        }
1122    }
1123
1124
1125    //---------------------------------------------------------
1126    // Audio data supply
1127    //--------------------
1128
1129    /**
1130     * Flushes the audio data currently queued for playback. Any data that has
1131     * not been played back will be discarded.  No-op if not stopped or paused,
1132     * or if the track's creation mode is not {@link #MODE_STREAM}.
1133     */
1134    public void flush() {
1135        if (mState == STATE_INITIALIZED) {
1136            // flush the data in native layer
1137            native_flush();
1138        }
1139
1140    }
1141
1142    /**
1143     * Writes the audio data to the audio sink for playback (streaming mode),
1144     * or copies audio data for later playback (static buffer mode).
1145     * In streaming mode, will block until all data has been written to the audio sink.
1146     * In static buffer mode, copies the data to the buffer starting at offset 0.
1147     * Note that the actual playback of this data might occur after this function
1148     * returns. This function is thread safe with respect to {@link #stop} calls,
1149     * in which case all of the specified data might not be written to the audio sink.
1150     *
1151     * @param audioData the array that holds the data to play.
1152     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
1153     *    starts.
1154     * @param sizeInBytes the number of bytes to read in audioData after the offset.
1155     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
1156     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1157     *    the parameters don't resolve to valid data and indexes.
1158     */
1159
1160    public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) {
1161
1162        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1163            return ERROR_INVALID_OPERATION;
1164        }
1165
1166        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
1167                || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
1168                || (offsetInBytes + sizeInBytes > audioData.length)) {
1169            return ERROR_BAD_VALUE;
1170        }
1171
1172        int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
1173                true /*isBlocking*/);
1174
1175        if ((mDataLoadMode == MODE_STATIC)
1176                && (mState == STATE_NO_STATIC_DATA)
1177                && (ret > 0)) {
1178            // benign race with respect to other APIs that read mState
1179            mState = STATE_INITIALIZED;
1180        }
1181
1182        return ret;
1183    }
1184
1185
1186    /**
1187     * Writes the audio data to the audio sink for playback (streaming mode),
1188     * or copies audio data for later playback (static buffer mode).
1189     * In streaming mode, will block until all data has been written to the audio sink.
1190     * In static buffer mode, copies the data to the buffer starting at offset 0.
1191     * Note that the actual playback of this data might occur after this function
1192     * returns. This function is thread safe with respect to {@link #stop} calls,
1193     * in which case all of the specified data might not be written to the audio sink.
1194     *
1195     * @param audioData the array that holds the data to play.
1196     * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1197     *     starts.
1198     * @param sizeInShorts the number of shorts to read in audioData after the offset.
1199     * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION}
1200     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1201     *    the parameters don't resolve to valid data and indexes.
1202     */
1203
1204    public int write(short[] audioData, int offsetInShorts, int sizeInShorts) {
1205
1206        if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1207            return ERROR_INVALID_OPERATION;
1208        }
1209
1210        if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
1211                || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
1212                || (offsetInShorts + sizeInShorts > audioData.length)) {
1213            return ERROR_BAD_VALUE;
1214        }
1215
1216        int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat);
1217
1218        if ((mDataLoadMode == MODE_STATIC)
1219                && (mState == STATE_NO_STATIC_DATA)
1220                && (ret > 0)) {
1221            // benign race with respect to other APIs that read mState
1222            mState = STATE_INITIALIZED;
1223        }
1224
1225        return ret;
1226    }
1227
1228
1229    /**
1230     * Writes the audio data to the audio sink for playback (streaming mode),
1231     * or copies audio data for later playback (static buffer mode).
1232     * In static buffer mode, copies the data to the buffer starting at offset 0,
1233     * and the write mode is ignored.
1234     * In streaming mode, the blocking behavior will depend on the write mode.
1235     * <p>
1236     * Note that the actual playback of this data might occur after this function
1237     * returns. This function is thread safe with respect to {@link #stop} calls,
1238     * in which case all of the specified data might not be written to the audio sink.
1239     * <p>
1240     * @param audioData the array that holds the data to play.
1241     *     The implementation does not clip for sample values within the nominal range
1242     *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
1243     *     less than or equal to unity (1.0f), and in the absence of post-processing effects
1244     *     that could add energy, such as reverb.  For the convenience of applications
1245     *     that compute samples using filters with non-unity gain,
1246     *     sample values +3 dB beyond the nominal range are permitted.
1247     *     However such values may eventually be limited or clipped, depending on various gains
1248     *     and later processing in the audio path.  Therefore applications are encouraged
1249     *     to provide samples values within the nominal range.
1250     * @param offsetInFloats the offset, expressed as a number of floats,
1251     *     in audioData where the data to play starts.
1252     * @param sizeInFloats the number of floats to read in audioData after the offset.
1253     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1254     *     effect in static mode.
1255     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1256     *         to the audio sink.
1257     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1258     *     queuing as much audio data for playback as possible without blocking.
1259     * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION}
1260     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1261     *    the parameters don't resolve to valid data and indexes.
1262     */
1263    public int write(float[] audioData, int offsetInFloats, int sizeInFloats,
1264            @WriteMode int writeMode) {
1265
1266        if (mState == STATE_UNINITIALIZED) {
1267            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1268            return ERROR_INVALID_OPERATION;
1269        }
1270
1271        if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
1272            Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
1273            return ERROR_INVALID_OPERATION;
1274        }
1275
1276        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1277            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1278            return ERROR_BAD_VALUE;
1279        }
1280
1281        if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
1282                || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
1283                || (offsetInFloats + sizeInFloats > audioData.length)) {
1284            Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
1285            return ERROR_BAD_VALUE;
1286        }
1287
1288        int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
1289                writeMode == WRITE_BLOCKING);
1290
1291        if ((mDataLoadMode == MODE_STATIC)
1292                && (mState == STATE_NO_STATIC_DATA)
1293                && (ret > 0)) {
1294            // benign race with respect to other APIs that read mState
1295            mState = STATE_INITIALIZED;
1296        }
1297
1298        return ret;
1299    }
1300
1301
1302    /**
1303     * Writes the audio data to the audio sink for playback (streaming mode),
1304     * or copies audio data for later playback (static buffer mode).
1305     * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write
1306     * mode is ignored.
1307     * In streaming mode, the blocking behavior will depend on the write mode.
1308     * @param audioData the buffer that holds the data to play, starting at the position reported
1309     *     by <code>audioData.position()</code>.
1310     *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
1311     *     have been advanced to reflect the amount of data that was successfully written to
1312     *     the AudioTrack.
1313     * @param sizeInBytes number of bytes to write.
1314     *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
1315     * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1316     *     effect in static mode.
1317     *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1318     *         to the audio sink.
1319     *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1320     *     queuing as much audio data for playback as possible without blocking.
1321     * @return 0 or a positive number of bytes that were written, or
1322     *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1323     */
1324    public int write(ByteBuffer audioData, int sizeInBytes,
1325            @WriteMode int writeMode) {
1326
1327        if (mState == STATE_UNINITIALIZED) {
1328            Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1329            return ERROR_INVALID_OPERATION;
1330        }
1331
1332        if (mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1333            Log.e(TAG, "AudioTrack.write(ByteBuffer ...) not yet supported for ENCODING_PCM_FLOAT");
1334            return ERROR_INVALID_OPERATION;
1335        }
1336
1337        if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1338            Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1339            return ERROR_BAD_VALUE;
1340        }
1341
1342        if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
1343            Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
1344            return ERROR_BAD_VALUE;
1345        }
1346
1347        int ret = 0;
1348        if (audioData.isDirect()) {
1349            ret = native_write_native_bytes(audioData,
1350                    audioData.position(), sizeInBytes, mAudioFormat,
1351                    writeMode == WRITE_BLOCKING);
1352        } else {
1353            ret = native_write_byte(NioUtils.unsafeArray(audioData),
1354                    NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
1355                    sizeInBytes, mAudioFormat,
1356                    writeMode == WRITE_BLOCKING);
1357        }
1358
1359        if ((mDataLoadMode == MODE_STATIC)
1360                && (mState == STATE_NO_STATIC_DATA)
1361                && (ret > 0)) {
1362            // benign race with respect to other APIs that read mState
1363            mState = STATE_INITIALIZED;
1364        }
1365
1366        if (ret > 0) {
1367            audioData.position(audioData.position() + ret);
1368        }
1369
1370        return ret;
1371    }
1372
1373    /**
1374     * Notifies the native resource to reuse the audio data already loaded in the native
1375     * layer, that is to rewind to start of buffer.
1376     * The track's creation mode must be {@link #MODE_STATIC}.
1377     * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1378     *  {@link #ERROR_INVALID_OPERATION}
1379     */
1380    public int reloadStaticData() {
1381        if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
1382            return ERROR_INVALID_OPERATION;
1383        }
1384        return native_reload_static();
1385    }
1386
1387    //--------------------------------------------------------------------------
1388    // Audio effects management
1389    //--------------------
1390
1391    /**
1392     * Attaches an auxiliary effect to the audio track. A typical auxiliary
1393     * effect is a reverberation effect which can be applied on any sound source
1394     * that directs a certain amount of its energy to this effect. This amount
1395     * is defined by setAuxEffectSendLevel().
1396     * {@see #setAuxEffectSendLevel(float)}.
1397     * <p>After creating an auxiliary effect (e.g.
1398     * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
1399     * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
1400     * this method to attach the audio track to the effect.
1401     * <p>To detach the effect from the audio track, call this method with a
1402     * null effect id.
1403     *
1404     * @param effectId system wide unique id of the effect to attach
1405     * @return error code or success, see {@link #SUCCESS},
1406     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
1407     */
1408    public int attachAuxEffect(int effectId) {
1409        if (mState == STATE_UNINITIALIZED) {
1410            return ERROR_INVALID_OPERATION;
1411        }
1412        return native_attachAuxEffect(effectId);
1413    }
1414
1415    /**
1416     * Sets the send level of the audio track to the attached auxiliary effect
1417     * {@link #attachAuxEffect(int)}.  Effect levels
1418     * are clamped to the closed interval [0.0, max] where
1419     * max is the value of {@link #getMaxVolume}.
1420     * A value of 0.0 results in no effect, and a value of 1.0 is full send.
1421     * <p>By default the send level is 0.0f, so even if an effect is attached to the player
1422     * this method must be called for the effect to be applied.
1423     * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
1424     * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
1425     * so an appropriate conversion from linear UI input x to level is:
1426     * x == 0 -&gt; level = 0
1427     * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
1428     *
1429     * @param level linear send level
1430     * @return error code or success, see {@link #SUCCESS},
1431     *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
1432     */
1433    public int setAuxEffectSendLevel(float level) {
1434        if (isRestricted()) {
1435            return SUCCESS;
1436        }
1437        if (mState == STATE_UNINITIALIZED) {
1438            return ERROR_INVALID_OPERATION;
1439        }
1440        level = clampGainOrLevel(level);
1441        int err = native_setAuxEffectSendLevel(level);
1442        return err == 0 ? SUCCESS : ERROR;
1443    }
1444
1445    //---------------------------------------------------------
1446    // Interface definitions
1447    //--------------------
1448    /**
1449     * Interface definition for a callback to be invoked when the playback head position of
1450     * an AudioTrack has reached a notification marker or has increased by a certain period.
1451     */
1452    public interface OnPlaybackPositionUpdateListener  {
1453        /**
1454         * Called on the listener to notify it that the previously set marker has been reached
1455         * by the playback head.
1456         */
1457        void onMarkerReached(AudioTrack track);
1458
1459        /**
1460         * Called on the listener to periodically notify it that the playback head has reached
1461         * a multiple of the notification period.
1462         */
1463        void onPeriodicNotification(AudioTrack track);
1464    }
1465
1466
1467    //---------------------------------------------------------
1468    // Inner classes
1469    //--------------------
1470    /**
1471     * Helper class to handle the forwarding of native events to the appropriate listener
1472     * (potentially) handled in a different thread
1473     */
1474    private class NativeEventHandlerDelegate {
1475        private final Handler mHandler;
1476
1477        NativeEventHandlerDelegate(final AudioTrack track,
1478                                   final OnPlaybackPositionUpdateListener listener,
1479                                   Handler handler) {
1480            // find the looper for our new event handler
1481            Looper looper;
1482            if (handler != null) {
1483                looper = handler.getLooper();
1484            } else {
1485                // no given handler, use the looper the AudioTrack was created in
1486                looper = mInitializationLooper;
1487            }
1488
1489            // construct the event handler with this looper
1490            if (looper != null) {
1491                // implement the event handler delegate
1492                mHandler = new Handler(looper) {
1493                    @Override
1494                    public void handleMessage(Message msg) {
1495                        if (track == null) {
1496                            return;
1497                        }
1498                        switch(msg.what) {
1499                        case NATIVE_EVENT_MARKER:
1500                            if (listener != null) {
1501                                listener.onMarkerReached(track);
1502                            }
1503                            break;
1504                        case NATIVE_EVENT_NEW_POS:
1505                            if (listener != null) {
1506                                listener.onPeriodicNotification(track);
1507                            }
1508                            break;
1509                        default:
1510                            loge("Unknown native event type: " + msg.what);
1511                            break;
1512                        }
1513                    }
1514                };
1515            } else {
1516                mHandler = null;
1517            }
1518        }
1519
1520        Handler getHandler() {
1521            return mHandler;
1522        }
1523    }
1524
1525
1526    //---------------------------------------------------------
1527    // Java methods called from the native side
1528    //--------------------
1529    @SuppressWarnings("unused")
1530    private static void postEventFromNative(Object audiotrack_ref,
1531            int what, int arg1, int arg2, Object obj) {
1532        //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
1533        AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
1534        if (track == null) {
1535            return;
1536        }
1537
1538        NativeEventHandlerDelegate delegate = track.mEventHandlerDelegate;
1539        if (delegate != null) {
1540            Handler handler = delegate.getHandler();
1541            if (handler != null) {
1542                Message m = handler.obtainMessage(what, arg1, arg2, obj);
1543                handler.sendMessage(m);
1544            }
1545        }
1546
1547    }
1548
1549
1550    //---------------------------------------------------------
1551    // Native methods called from the Java side
1552    //--------------------
1553
1554    private native final int native_setup(Object audiotrack_this,
1555            int streamType, int sampleRate, int channelMask, int audioFormat,
1556            int buffSizeInBytes, int mode, int[] sessionId);
1557
1558    private native final void native_finalize();
1559
1560    private native final void native_release();
1561
1562    private native final void native_start();
1563
1564    private native final void native_stop();
1565
1566    private native final void native_pause();
1567
1568    private native final void native_flush();
1569
1570    private native final int native_write_byte(byte[] audioData,
1571                                               int offsetInBytes, int sizeInBytes, int format,
1572                                               boolean isBlocking);
1573
1574    private native final int native_write_short(short[] audioData,
1575                                                int offsetInShorts, int sizeInShorts, int format);
1576
1577    private native final int native_write_float(float[] audioData,
1578                                                int offsetInFloats, int sizeInFloats, int format,
1579                                                boolean isBlocking);
1580
1581    private native final int native_write_native_bytes(Object audioData,
1582            int positionInBytes, int sizeInBytes, int format, boolean blocking);
1583
1584    private native final int native_reload_static();
1585
1586    private native final int native_get_native_frame_count();
1587
1588    private native final void native_setVolume(float leftVolume, float rightVolume);
1589
1590    private native final int native_set_playback_rate(int sampleRateInHz);
1591    private native final int native_get_playback_rate();
1592
1593    private native final int native_set_marker_pos(int marker);
1594    private native final int native_get_marker_pos();
1595
1596    private native final int native_set_pos_update_period(int updatePeriod);
1597    private native final int native_get_pos_update_period();
1598
1599    private native final int native_set_position(int position);
1600    private native final int native_get_position();
1601
1602    private native final int native_get_latency();
1603
1604    // longArray must be a non-null array of length >= 2
1605    // [0] is assigned the frame position
1606    // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
1607    private native final int native_get_timestamp(long[] longArray);
1608
1609    private native final int native_set_loop(int start, int end, int loopCount);
1610
1611    static private native final int native_get_output_sample_rate(int streamType);
1612    static private native final int native_get_min_buff_size(
1613            int sampleRateInHz, int channelConfig, int audioFormat);
1614
1615    private native final int native_attachAuxEffect(int effectId);
1616    private native final int native_setAuxEffectSendLevel(float level);
1617
1618    //---------------------------------------------------------
1619    // Utility methods
1620    //------------------
1621
1622    private static void logd(String msg) {
1623        Log.d(TAG, msg);
1624    }
1625
1626    private static void loge(String msg) {
1627        Log.e(TAG, msg);
1628    }
1629
1630}
1631