1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17package android.media; 18 19import java.lang.annotation.Retention; 20import java.lang.annotation.RetentionPolicy; 21import java.lang.ref.WeakReference; 22import java.lang.Math; 23import java.nio.ByteBuffer; 24import java.nio.ByteOrder; 25import java.nio.NioUtils; 26import java.util.Collection; 27 28import android.annotation.IntDef; 29import android.annotation.NonNull; 30import android.app.ActivityThread; 31import android.content.Context; 32import android.os.Handler; 33import android.os.IBinder; 34import android.os.Looper; 35import android.os.Message; 36import android.os.Process; 37import android.os.RemoteException; 38import android.os.ServiceManager; 39import android.util.ArrayMap; 40import android.util.Log; 41 42import com.android.internal.annotations.GuardedBy; 43 44/** 45 * The AudioTrack class manages and plays a single audio resource for Java applications. 46 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 47 * achieved by "pushing" the data to the AudioTrack object using one of the 48 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 49 * and {@link #write(float[], int, int, int)} methods. 50 * 51 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 52 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 53 * one of the {@code write()} methods. These are blocking and return when the data has been 54 * transferred from the Java layer to the native layer and queued for playback. The streaming 55 * mode is most useful when playing blocks of audio data that for instance are: 56 * 57 * <ul> 58 * <li>too big to fit in memory because of the duration of the sound to play,</li> 59 * <li>too big to fit in memory because of the characteristics of the audio data 60 * (high sampling rate, bits per sample ...)</li> 61 * <li>received or generated while previously queued audio is playing.</li> 62 * </ul> 63 * 64 * The static mode should be chosen when dealing with short sounds that fit in memory and 65 * that need to be played with the smallest latency possible. The static mode will 66 * therefore be preferred for UI and game sounds that are played often, and with the 67 * smallest overhead possible. 68 * 69 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 70 * The size of this buffer, specified during the construction, determines how long an AudioTrack 71 * can play before running out of data.<br> 72 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 73 * be played from it.<br> 74 * For the streaming mode, data will be written to the audio sink in chunks of 75 * sizes less than or equal to the total buffer size. 76 * 77 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 78 */ 79public class AudioTrack extends PlayerBase 80 implements AudioRouting 81{ 82 //--------------------------------------------------------- 83 // Constants 84 //-------------------- 85 /** Minimum value for a linear gain or auxiliary effect level. 86 * This value must be exactly equal to 0.0f; do not change it. 87 */ 88 private static final float GAIN_MIN = 0.0f; 89 /** Maximum value for a linear gain or auxiliary effect level. 90 * This value must be greater than or equal to 1.0f. 91 */ 92 private static final float GAIN_MAX = 1.0f; 93 94 /** Maximum value for AudioTrack channel count 95 * @hide public for MediaCode only, do not un-hide or change to a numeric literal 96 */ 97 public static final int CHANNEL_COUNT_MAX = native_get_FCC_8(); 98 99 /** indicates AudioTrack state is stopped */ 100 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 101 /** indicates AudioTrack state is paused */ 102 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 103 /** indicates AudioTrack state is playing */ 104 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 105 106 // keep these values in sync with android_media_AudioTrack.cpp 107 /** 108 * Creation mode where audio data is transferred from Java to the native layer 109 * only once before the audio starts playing. 110 */ 111 public static final int MODE_STATIC = 0; 112 /** 113 * Creation mode where audio data is streamed from Java to the native layer 114 * as the audio is playing. 115 */ 116 public static final int MODE_STREAM = 1; 117 118 /** @hide */ 119 @IntDef({ 120 MODE_STATIC, 121 MODE_STREAM 122 }) 123 @Retention(RetentionPolicy.SOURCE) 124 public @interface TransferMode {} 125 126 /** 127 * State of an AudioTrack that was not successfully initialized upon creation. 128 */ 129 public static final int STATE_UNINITIALIZED = 0; 130 /** 131 * State of an AudioTrack that is ready to be used. 132 */ 133 public static final int STATE_INITIALIZED = 1; 134 /** 135 * State of a successfully initialized AudioTrack that uses static data, 136 * but that hasn't received that data yet. 137 */ 138 public static final int STATE_NO_STATIC_DATA = 2; 139 140 /** 141 * Denotes a successful operation. 142 */ 143 public static final int SUCCESS = AudioSystem.SUCCESS; 144 /** 145 * Denotes a generic operation failure. 146 */ 147 public static final int ERROR = AudioSystem.ERROR; 148 /** 149 * Denotes a failure due to the use of an invalid value. 150 */ 151 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 152 /** 153 * Denotes a failure due to the improper use of a method. 154 */ 155 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 156 /** 157 * An error code indicating that the object reporting it is no longer valid and needs to 158 * be recreated. 159 */ 160 public static final int ERROR_DEAD_OBJECT = AudioSystem.DEAD_OBJECT; 161 /** 162 * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state, 163 * or immediately after start/ACTIVE. 164 * @hide 165 */ 166 public static final int ERROR_WOULD_BLOCK = AudioSystem.WOULD_BLOCK; 167 168 // Error codes: 169 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 170 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 171 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 172 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 173 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 174 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 175 176 // Events: 177 // to keep in sync with frameworks/av/include/media/AudioTrack.h 178 /** 179 * Event id denotes when playback head has reached a previously set marker. 180 */ 181 private static final int NATIVE_EVENT_MARKER = 3; 182 /** 183 * Event id denotes when previously set update period has elapsed during playback. 184 */ 185 private static final int NATIVE_EVENT_NEW_POS = 4; 186 187 private final static String TAG = "android.media.AudioTrack"; 188 189 190 /** @hide */ 191 @IntDef({ 192 WRITE_BLOCKING, 193 WRITE_NON_BLOCKING 194 }) 195 @Retention(RetentionPolicy.SOURCE) 196 public @interface WriteMode {} 197 198 /** 199 * The write mode indicating the write operation will block until all data has been written, 200 * to be used as the actual value of the writeMode parameter in 201 * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)}, 202 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 203 * {@link #write(ByteBuffer, int, int, long)}. 204 */ 205 public final static int WRITE_BLOCKING = 0; 206 207 /** 208 * The write mode indicating the write operation will return immediately after 209 * queuing as much audio data for playback as possible without blocking, 210 * to be used as the actual value of the writeMode parameter in 211 * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)}, 212 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 213 * {@link #write(ByteBuffer, int, int, long)}. 214 */ 215 public final static int WRITE_NON_BLOCKING = 1; 216 217 //-------------------------------------------------------------------------- 218 // Member variables 219 //-------------------- 220 /** 221 * Indicates the state of the AudioTrack instance. 222 * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA. 223 */ 224 private int mState = STATE_UNINITIALIZED; 225 /** 226 * Indicates the play state of the AudioTrack instance. 227 * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING. 228 */ 229 private int mPlayState = PLAYSTATE_STOPPED; 230 /** 231 * Lock to ensure mPlayState updates reflect the actual state of the object. 232 */ 233 private final Object mPlayStateLock = new Object(); 234 /** 235 * Sizes of the audio buffer. 236 * These values are set during construction and can be stale. 237 * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}. 238 */ 239 private int mNativeBufferSizeInBytes = 0; 240 private int mNativeBufferSizeInFrames = 0; 241 /** 242 * Handler for events coming from the native code. 243 */ 244 private NativePositionEventHandlerDelegate mEventHandlerDelegate; 245 /** 246 * Looper associated with the thread that creates the AudioTrack instance. 247 */ 248 private final Looper mInitializationLooper; 249 /** 250 * The audio data source sampling rate in Hz. 251 * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}. 252 */ 253 private int mSampleRate; // initialized by all constructors via audioParamCheck() 254 /** 255 * The number of audio output channels (1 is mono, 2 is stereo, etc.). 256 */ 257 private int mChannelCount = 1; 258 /** 259 * The audio channel mask used for calling native AudioTrack 260 */ 261 private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 262 263 /** 264 * The type of the audio stream to play. See 265 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 266 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 267 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 268 * {@link AudioManager#STREAM_DTMF}. 269 */ 270 private int mStreamType = AudioManager.STREAM_MUSIC; 271 272 /** 273 * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM. 274 */ 275 private int mDataLoadMode = MODE_STREAM; 276 /** 277 * The current channel position mask, as specified on AudioTrack creation. 278 * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}. 279 * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified. 280 */ 281 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 282 /** 283 * The channel index mask if specified, otherwise 0. 284 */ 285 private int mChannelIndexMask = 0; 286 /** 287 * The encoding of the audio samples. 288 * @see AudioFormat#ENCODING_PCM_8BIT 289 * @see AudioFormat#ENCODING_PCM_16BIT 290 * @see AudioFormat#ENCODING_PCM_FLOAT 291 */ 292 private int mAudioFormat; // initialized by all constructors via audioParamCheck() 293 /** 294 * Audio session ID 295 */ 296 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 297 /** 298 * HW_AV_SYNC track AV Sync Header 299 */ 300 private ByteBuffer mAvSyncHeader = null; 301 /** 302 * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header 303 */ 304 private int mAvSyncBytesRemaining = 0; 305 306 //-------------------------------- 307 // Used exclusively by native code 308 //-------------------- 309 /** 310 * @hide 311 * Accessed by native methods: provides access to C++ AudioTrack object. 312 */ 313 @SuppressWarnings("unused") 314 protected long mNativeTrackInJavaObj; 315 /** 316 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 317 * the native AudioTrack object, but not stored in it). 318 */ 319 @SuppressWarnings("unused") 320 private long mJniData; 321 322 323 //-------------------------------------------------------------------------- 324 // Constructor, Finalize 325 //-------------------- 326 /** 327 * Class constructor. 328 * @param streamType the type of the audio stream. See 329 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 330 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 331 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 332 * @param sampleRateInHz the initial source sample rate expressed in Hz. 333 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 334 * which is usually the sample rate of the sink. 335 * {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen. 336 * @param channelConfig describes the configuration of the audio channels. 337 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 338 * {@link AudioFormat#CHANNEL_OUT_STEREO} 339 * @param audioFormat the format in which the audio data is represented. 340 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 341 * {@link AudioFormat#ENCODING_PCM_8BIT}, 342 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 343 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 344 * read from for playback. This should be a nonzero multiple of the frame size in bytes. 345 * <p> If the track's creation mode is {@link #MODE_STATIC}, 346 * this is the maximum length sample, or audio clip, that can be played by this instance. 347 * <p> If the track's creation mode is {@link #MODE_STREAM}, 348 * this should be the desired buffer size 349 * for the <code>AudioTrack</code> to satisfy the application's 350 * latency requirements. 351 * If <code>bufferSizeInBytes</code> is less than the 352 * minimum buffer size for the output sink, it is increased to the minimum 353 * buffer size. 354 * The method {@link #getBufferSizeInFrames()} returns the 355 * actual size in frames of the buffer created, which 356 * determines the minimum frequency to write 357 * to the streaming <code>AudioTrack</code> to avoid underrun. 358 * See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size 359 * for an AudioTrack instance in streaming mode. 360 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 361 * @throws java.lang.IllegalArgumentException 362 */ 363 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 364 int bufferSizeInBytes, int mode) 365 throws IllegalArgumentException { 366 this(streamType, sampleRateInHz, channelConfig, audioFormat, 367 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE); 368 } 369 370 /** 371 * Class constructor with audio session. Use this constructor when the AudioTrack must be 372 * attached to a particular audio session. The primary use of the audio session ID is to 373 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 374 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 375 * and media players in the same session and not to the output mix. 376 * When an AudioTrack is created without specifying a session, it will create its own session 377 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 378 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 379 * session 380 * with all other media players or audio tracks in the same session, otherwise a new session 381 * will be created for this track if none is supplied. 382 * @param streamType the type of the audio stream. See 383 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 384 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 385 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 386 * @param sampleRateInHz the initial source sample rate expressed in Hz. 387 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 388 * which is usually the sample rate of the sink. 389 * @param channelConfig describes the configuration of the audio channels. 390 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 391 * {@link AudioFormat#CHANNEL_OUT_STEREO} 392 * @param audioFormat the format in which the audio data is represented. 393 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 394 * {@link AudioFormat#ENCODING_PCM_8BIT}, 395 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 396 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 397 * read from for playback. This should be a nonzero multiple of the frame size in bytes. 398 * <p> If the track's creation mode is {@link #MODE_STATIC}, 399 * this is the maximum length sample, or audio clip, that can be played by this instance. 400 * <p> If the track's creation mode is {@link #MODE_STREAM}, 401 * this should be the desired buffer size 402 * for the <code>AudioTrack</code> to satisfy the application's 403 * latency requirements. 404 * If <code>bufferSizeInBytes</code> is less than the 405 * minimum buffer size for the output sink, it is increased to the minimum 406 * buffer size. 407 * The method {@link #getBufferSizeInFrames()} returns the 408 * actual size in frames of the buffer created, which 409 * determines the minimum frequency to write 410 * to the streaming <code>AudioTrack</code> to avoid underrun. 411 * You can write data into this buffer in smaller chunks than this size. 412 * See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size 413 * for an AudioTrack instance in streaming mode. 414 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 415 * @param sessionId Id of audio session the AudioTrack must be attached to 416 * @throws java.lang.IllegalArgumentException 417 */ 418 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 419 int bufferSizeInBytes, int mode, int sessionId) 420 throws IllegalArgumentException { 421 // mState already == STATE_UNINITIALIZED 422 this((new AudioAttributes.Builder()) 423 .setLegacyStreamType(streamType) 424 .build(), 425 (new AudioFormat.Builder()) 426 .setChannelMask(channelConfig) 427 .setEncoding(audioFormat) 428 .setSampleRate(sampleRateInHz) 429 .build(), 430 bufferSizeInBytes, 431 mode, sessionId); 432 } 433 434 /** 435 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 436 * @param attributes a non-null {@link AudioAttributes} instance. 437 * @param format a non-null {@link AudioFormat} instance describing the format of the data 438 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 439 * configuring the audio format parameters such as encoding, channel mask and sample rate. 440 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 441 * read from for playback. This should be a nonzero multiple of the frame size in bytes. 442 * <p> If the track's creation mode is {@link #MODE_STATIC}, 443 * this is the maximum length sample, or audio clip, that can be played by this instance. 444 * <p> If the track's creation mode is {@link #MODE_STREAM}, 445 * this should be the desired buffer size 446 * for the <code>AudioTrack</code> to satisfy the application's 447 * latency requirements. 448 * If <code>bufferSizeInBytes</code> is less than the 449 * minimum buffer size for the output sink, it is increased to the minimum 450 * buffer size. 451 * The method {@link #getBufferSizeInFrames()} returns the 452 * actual size in frames of the buffer created, which 453 * determines the minimum frequency to write 454 * to the streaming <code>AudioTrack</code> to avoid underrun. 455 * See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size 456 * for an AudioTrack instance in streaming mode. 457 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 458 * @param sessionId ID of audio session the AudioTrack must be attached to, or 459 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 460 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 461 * construction. 462 * @throws IllegalArgumentException 463 */ 464 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 465 int mode, int sessionId) 466 throws IllegalArgumentException { 467 super(attributes); 468 // mState already == STATE_UNINITIALIZED 469 470 if (format == null) { 471 throw new IllegalArgumentException("Illegal null AudioFormat"); 472 } 473 474 // remember which looper is associated with the AudioTrack instantiation 475 Looper looper; 476 if ((looper = Looper.myLooper()) == null) { 477 looper = Looper.getMainLooper(); 478 } 479 480 int rate = format.getSampleRate(); 481 if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 482 rate = 0; 483 } 484 485 int channelIndexMask = 0; 486 if ((format.getPropertySetMask() 487 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { 488 channelIndexMask = format.getChannelIndexMask(); 489 } 490 int channelMask = 0; 491 if ((format.getPropertySetMask() 492 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { 493 channelMask = format.getChannelMask(); 494 } else if (channelIndexMask == 0) { // if no masks at all, use stereo 495 channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT 496 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 497 } 498 int encoding = AudioFormat.ENCODING_DEFAULT; 499 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 500 encoding = format.getEncoding(); 501 } 502 audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); 503 mStreamType = AudioSystem.STREAM_DEFAULT; 504 505 audioBuffSizeCheck(bufferSizeInBytes); 506 507 mInitializationLooper = looper; 508 509 if (sessionId < 0) { 510 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 511 } 512 513 int[] sampleRate = new int[] {mSampleRate}; 514 int[] session = new int[1]; 515 session[0] = sessionId; 516 // native initialization 517 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 518 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat, 519 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/); 520 if (initResult != SUCCESS) { 521 loge("Error code "+initResult+" when initializing AudioTrack."); 522 return; // with mState == STATE_UNINITIALIZED 523 } 524 525 mSampleRate = sampleRate[0]; 526 mSessionId = session[0]; 527 528 if (mDataLoadMode == MODE_STATIC) { 529 mState = STATE_NO_STATIC_DATA; 530 } else { 531 mState = STATE_INITIALIZED; 532 } 533 } 534 535 /** 536 * A constructor which explicitly connects a Native (C++) AudioTrack. For use by 537 * the AudioTrackRoutingProxy subclass. 538 * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack 539 * (associated with an OpenSL ES player). 540 * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy, 541 * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj 542 * it means that the OpenSL player interface hasn't been realized, so there is no native 543 * Audiotrack to connect to. In this case wait to call deferred_connect() until the 544 * OpenSLES interface is realized. 545 */ 546 /*package*/ AudioTrack(long nativeTrackInJavaObj) { 547 super(new AudioAttributes.Builder().build()); 548 // "final"s 549 mNativeTrackInJavaObj = 0; 550 mJniData = 0; 551 552 // remember which looper is associated with the AudioTrack instantiation 553 Looper looper; 554 if ((looper = Looper.myLooper()) == null) { 555 looper = Looper.getMainLooper(); 556 } 557 mInitializationLooper = looper; 558 559 // other initialization... 560 if (nativeTrackInJavaObj != 0) { 561 deferred_connect(nativeTrackInJavaObj); 562 } else { 563 mState = STATE_UNINITIALIZED; 564 } 565 } 566 567 /** 568 * @hide 569 */ 570 /* package */ void deferred_connect(long nativeTrackInJavaObj) { 571 if (mState != STATE_INITIALIZED) { 572 // Note that for this native_setup, we are providing an already created/initialized 573 // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored. 574 int[] session = { 0 }; 575 int[] rates = { 0 }; 576 int initResult = native_setup(new WeakReference<AudioTrack>(this), 577 null /*mAttributes - NA*/, 578 rates /*sampleRate - NA*/, 579 0 /*mChannelMask - NA*/, 580 0 /*mChannelIndexMask - NA*/, 581 0 /*mAudioFormat - NA*/, 582 0 /*mNativeBufferSizeInBytes - NA*/, 583 0 /*mDataLoadMode - NA*/, 584 session, 585 nativeTrackInJavaObj); 586 if (initResult != SUCCESS) { 587 loge("Error code "+initResult+" when initializing AudioTrack."); 588 return; // with mState == STATE_UNINITIALIZED 589 } 590 591 mSessionId = session[0]; 592 593 mState = STATE_INITIALIZED; 594 } 595 } 596 597 /** 598 * Builder class for {@link AudioTrack} objects. 599 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 600 * attributes and audio format parameters, you indicate which of those vary from the default 601 * behavior on the device. 602 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 603 * parameters, to be used by a new <code>AudioTrack</code> instance: 604 * 605 * <pre class="prettyprint"> 606 * AudioTrack player = new AudioTrack.Builder() 607 * .setAudioAttributes(new AudioAttributes.Builder() 608 * .setUsage(AudioAttributes.USAGE_ALARM) 609 * .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC) 610 * .build()) 611 * .setAudioFormat(new AudioFormat.Builder() 612 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 613 * .setSampleRate(44100) 614 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 615 * .build()) 616 * .setBufferSizeInBytes(minBuffSize) 617 * .build(); 618 * </pre> 619 * <p> 620 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 621 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 622 * <br>If the audio format is not specified or is incomplete, its channel configuration will be 623 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 624 * {@link AudioFormat#ENCODING_PCM_16BIT}. 625 * The sample rate will depend on the device actually selected for playback and can be queried 626 * with {@link #getSampleRate()} method. 627 * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)}, 628 * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used. 629 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 630 * <code>MODE_STREAM</code> will be used. 631 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 632 * be generated. 633 */ 634 public static class Builder { 635 private AudioAttributes mAttributes; 636 private AudioFormat mFormat; 637 private int mBufferSizeInBytes; 638 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 639 private int mMode = MODE_STREAM; 640 641 /** 642 * Constructs a new Builder with the default values as described above. 643 */ 644 public Builder() { 645 } 646 647 /** 648 * Sets the {@link AudioAttributes}. 649 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 650 * data to be played. 651 * @return the same Builder instance. 652 * @throws IllegalArgumentException 653 */ 654 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 655 throws IllegalArgumentException { 656 if (attributes == null) { 657 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 658 } 659 // keep reference, we only copy the data when building 660 mAttributes = attributes; 661 return this; 662 } 663 664 /** 665 * Sets the format of the audio data to be played by the {@link AudioTrack}. 666 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 667 * as encoding, channel mask and sample rate. 668 * @param format a non-null {@link AudioFormat} instance. 669 * @return the same Builder instance. 670 * @throws IllegalArgumentException 671 */ 672 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 673 throws IllegalArgumentException { 674 if (format == null) { 675 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 676 } 677 // keep reference, we only copy the data when building 678 mFormat = format; 679 return this; 680 } 681 682 /** 683 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 684 * If using the {@link AudioTrack} in streaming mode 685 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 686 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 687 * the estimated minimum buffer size for the creation of an AudioTrack instance 688 * in streaming mode. 689 * <br>If using the <code>AudioTrack</code> in static mode (see 690 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 691 * played by this instance. 692 * @param bufferSizeInBytes 693 * @return the same Builder instance. 694 * @throws IllegalArgumentException 695 */ 696 public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes) 697 throws IllegalArgumentException { 698 if (bufferSizeInBytes <= 0) { 699 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 700 } 701 mBufferSizeInBytes = bufferSizeInBytes; 702 return this; 703 } 704 705 /** 706 * Sets the mode under which buffers of audio data are transferred from the 707 * {@link AudioTrack} to the framework. 708 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 709 * @return the same Builder instance. 710 * @throws IllegalArgumentException 711 */ 712 public @NonNull Builder setTransferMode(@TransferMode int mode) 713 throws IllegalArgumentException { 714 switch(mode) { 715 case MODE_STREAM: 716 case MODE_STATIC: 717 mMode = mode; 718 break; 719 default: 720 throw new IllegalArgumentException("Invalid transfer mode " + mode); 721 } 722 return this; 723 } 724 725 /** 726 * Sets the session ID the {@link AudioTrack} will be attached to. 727 * @param sessionId a strictly positive ID number retrieved from another 728 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 729 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 730 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 731 * @return the same Builder instance. 732 * @throws IllegalArgumentException 733 */ 734 public @NonNull Builder setSessionId(int sessionId) 735 throws IllegalArgumentException { 736 if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 737 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 738 } 739 mSessionId = sessionId; 740 return this; 741 } 742 743 /** 744 * Builds an {@link AudioTrack} instance initialized with all the parameters set 745 * on this <code>Builder</code>. 746 * @return a new successfully initialized {@link AudioTrack} instance. 747 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 748 * were incompatible, or if they are not supported by the device, 749 * or if the device was not available. 750 */ 751 public @NonNull AudioTrack build() throws UnsupportedOperationException { 752 if (mAttributes == null) { 753 mAttributes = new AudioAttributes.Builder() 754 .setUsage(AudioAttributes.USAGE_MEDIA) 755 .build(); 756 } 757 if (mFormat == null) { 758 mFormat = new AudioFormat.Builder() 759 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 760 //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED) 761 .setEncoding(AudioFormat.ENCODING_DEFAULT) 762 .build(); 763 } 764 try { 765 // If the buffer size is not specified in streaming mode, 766 // use a single frame for the buffer size and let the 767 // native code figure out the minimum buffer size. 768 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) { 769 mBufferSizeInBytes = mFormat.getChannelCount() 770 * mFormat.getBytesPerSample(mFormat.getEncoding()); 771 } 772 final AudioTrack track = new AudioTrack( 773 mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId); 774 if (track.getState() == STATE_UNINITIALIZED) { 775 // release is not necessary 776 throw new UnsupportedOperationException("Cannot create AudioTrack"); 777 } 778 return track; 779 } catch (IllegalArgumentException e) { 780 throw new UnsupportedOperationException(e.getMessage()); 781 } 782 } 783 } 784 785 // mask of all the positional channels supported, however the allowed combinations 786 // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX 787 private static final int SUPPORTED_OUT_CHANNELS = 788 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 789 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 790 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 791 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 792 AudioFormat.CHANNEL_OUT_BACK_LEFT | 793 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 794 AudioFormat.CHANNEL_OUT_BACK_CENTER | 795 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 796 AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 797 798 // Convenience method for the constructor's parameter checks. 799 // This is where constructor IllegalArgumentException-s are thrown 800 // postconditions: 801 // mChannelCount is valid 802 // mChannelMask is valid 803 // mAudioFormat is valid 804 // mSampleRate is valid 805 // mDataLoadMode is valid 806 private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, 807 int audioFormat, int mode) { 808 //-------------- 809 // sample rate, note these values are subject to change 810 if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN || 811 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) && 812 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 813 throw new IllegalArgumentException(sampleRateInHz 814 + "Hz is not a supported sample rate."); 815 } 816 mSampleRate = sampleRateInHz; 817 818 // IEC61937 is based on stereo. We could coerce it to stereo. 819 // But the application needs to know the stream is stereo so that 820 // it is encoded and played correctly. So better to just reject it. 821 if (audioFormat == AudioFormat.ENCODING_IEC61937 822 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO) { 823 throw new IllegalArgumentException( 824 "ENCODING_IEC61937 must be configured as CHANNEL_OUT_STEREO"); 825 } 826 827 //-------------- 828 // channel config 829 mChannelConfiguration = channelConfig; 830 831 switch (channelConfig) { 832 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 833 case AudioFormat.CHANNEL_OUT_MONO: 834 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 835 mChannelCount = 1; 836 mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 837 break; 838 case AudioFormat.CHANNEL_OUT_STEREO: 839 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 840 mChannelCount = 2; 841 mChannelMask = AudioFormat.CHANNEL_OUT_STEREO; 842 break; 843 default: 844 if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) { 845 mChannelCount = 0; 846 break; // channel index configuration only 847 } 848 if (!isMultichannelConfigSupported(channelConfig)) { 849 // input channel configuration features unsupported channels 850 throw new IllegalArgumentException("Unsupported channel configuration."); 851 } 852 mChannelMask = channelConfig; 853 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 854 } 855 // check the channel index configuration (if present) 856 mChannelIndexMask = channelIndexMask; 857 if (mChannelIndexMask != 0) { 858 // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2 859 final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1; 860 if ((channelIndexMask & ~indexMask) != 0) { 861 throw new IllegalArgumentException("Unsupported channel index configuration " 862 + channelIndexMask); 863 } 864 int channelIndexCount = Integer.bitCount(channelIndexMask); 865 if (mChannelCount == 0) { 866 mChannelCount = channelIndexCount; 867 } else if (mChannelCount != channelIndexCount) { 868 throw new IllegalArgumentException("Channel count must match"); 869 } 870 } 871 872 //-------------- 873 // audio format 874 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 875 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 876 } 877 878 if (!AudioFormat.isPublicEncoding(audioFormat)) { 879 throw new IllegalArgumentException("Unsupported audio encoding."); 880 } 881 mAudioFormat = audioFormat; 882 883 //-------------- 884 // audio load mode 885 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 886 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 887 throw new IllegalArgumentException("Invalid mode."); 888 } 889 mDataLoadMode = mode; 890 } 891 892 /** 893 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 894 * @param channelConfig the mask to validate 895 * @return false if the AudioTrack can't be used with such a mask 896 */ 897 private static boolean isMultichannelConfigSupported(int channelConfig) { 898 // check for unsupported channels 899 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 900 loge("Channel configuration features unsupported channels"); 901 return false; 902 } 903 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 904 if (channelCount > CHANNEL_COUNT_MAX) { 905 loge("Channel configuration contains too many channels " + 906 channelCount + ">" + CHANNEL_COUNT_MAX); 907 return false; 908 } 909 // check for unsupported multichannel combinations: 910 // - FL/FR must be present 911 // - L/R channels must be paired (e.g. no single L channel) 912 final int frontPair = 913 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 914 if ((channelConfig & frontPair) != frontPair) { 915 loge("Front channels must be present in multichannel configurations"); 916 return false; 917 } 918 final int backPair = 919 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 920 if ((channelConfig & backPair) != 0) { 921 if ((channelConfig & backPair) != backPair) { 922 loge("Rear channels can't be used independently"); 923 return false; 924 } 925 } 926 final int sidePair = 927 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 928 if ((channelConfig & sidePair) != 0 929 && (channelConfig & sidePair) != sidePair) { 930 loge("Side channels can't be used independently"); 931 return false; 932 } 933 return true; 934 } 935 936 937 // Convenience method for the constructor's audio buffer size check. 938 // preconditions: 939 // mChannelCount is valid 940 // mAudioFormat is valid 941 // postcondition: 942 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) 943 private void audioBuffSizeCheck(int audioBufferSize) { 944 // NB: this section is only valid with PCM or IEC61937 data. 945 // To update when supporting compressed formats 946 int frameSizeInBytes; 947 if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) { 948 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat); 949 } else { 950 frameSizeInBytes = 1; 951 } 952 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 953 throw new IllegalArgumentException("Invalid audio buffer size."); 954 } 955 956 mNativeBufferSizeInBytes = audioBufferSize; 957 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 958 } 959 960 961 /** 962 * Releases the native AudioTrack resources. 963 */ 964 public void release() { 965 // even though native_release() stops the native AudioTrack, we need to stop 966 // AudioTrack subclasses too. 967 try { 968 stop(); 969 } catch(IllegalStateException ise) { 970 // don't raise an exception, we're releasing the resources. 971 } 972 baseRelease(); 973 native_release(); 974 mState = STATE_UNINITIALIZED; 975 } 976 977 @Override 978 protected void finalize() { 979 baseRelease(); 980 native_finalize(); 981 } 982 983 //-------------------------------------------------------------------------- 984 // Getters 985 //-------------------- 986 /** 987 * Returns the minimum gain value, which is the constant 0.0. 988 * Gain values less than 0.0 will be clamped to 0.0. 989 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 990 * @return the minimum value, which is the constant 0.0. 991 */ 992 static public float getMinVolume() { 993 return GAIN_MIN; 994 } 995 996 /** 997 * Returns the maximum gain value, which is greater than or equal to 1.0. 998 * Gain values greater than the maximum will be clamped to the maximum. 999 * <p>The word "volume" in the API name is historical; this is actually a gain. 1000 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 1001 * corresponds to a gain of 0 dB (sample values left unmodified). 1002 * @return the maximum value, which is greater than or equal to 1.0. 1003 */ 1004 static public float getMaxVolume() { 1005 return GAIN_MAX; 1006 } 1007 1008 /** 1009 * Returns the configured audio source sample rate in Hz. 1010 * The initial source sample rate depends on the constructor parameters, 1011 * but the source sample rate may change if {@link #setPlaybackRate(int)} is called. 1012 * If the constructor had a specific sample rate, then the initial sink sample rate is that 1013 * value. 1014 * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}, 1015 * then the initial sink sample rate is a route-dependent default value based on the source [sic]. 1016 */ 1017 public int getSampleRate() { 1018 return mSampleRate; 1019 } 1020 1021 /** 1022 * Returns the current playback sample rate rate in Hz. 1023 */ 1024 public int getPlaybackRate() { 1025 return native_get_playback_rate(); 1026 } 1027 1028 /** 1029 * Returns the current playback parameters. 1030 * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters 1031 * @return current {@link PlaybackParams}. 1032 * @throws IllegalStateException if track is not initialized. 1033 */ 1034 public @NonNull PlaybackParams getPlaybackParams() { 1035 return native_get_playback_params(); 1036 } 1037 1038 /** 1039 * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT}, 1040 * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1041 */ 1042 public int getAudioFormat() { 1043 return mAudioFormat; 1044 } 1045 1046 /** 1047 * Returns the type of audio stream this AudioTrack is configured for. 1048 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 1049 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 1050 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 1051 * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}. 1052 */ 1053 public int getStreamType() { 1054 return mStreamType; 1055 } 1056 1057 /** 1058 * Returns the configured channel position mask. 1059 * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO}, 1060 * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}. 1061 * This method may return {@link AudioFormat#CHANNEL_INVALID} if 1062 * a channel index mask was used. Consider 1063 * {@link #getFormat()} instead, to obtain an {@link AudioFormat}, 1064 * which contains both the channel position mask and the channel index mask. 1065 */ 1066 public int getChannelConfiguration() { 1067 return mChannelConfiguration; 1068 } 1069 1070 /** 1071 * Returns the configured <code>AudioTrack</code> format. 1072 * @return an {@link AudioFormat} containing the 1073 * <code>AudioTrack</code> parameters at the time of configuration. 1074 */ 1075 public @NonNull AudioFormat getFormat() { 1076 AudioFormat.Builder builder = new AudioFormat.Builder() 1077 .setSampleRate(mSampleRate) 1078 .setEncoding(mAudioFormat); 1079 if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) { 1080 builder.setChannelMask(mChannelConfiguration); 1081 } 1082 if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) { 1083 builder.setChannelIndexMask(mChannelIndexMask); 1084 } 1085 return builder.build(); 1086 } 1087 1088 /** 1089 * Returns the configured number of channels. 1090 */ 1091 public int getChannelCount() { 1092 return mChannelCount; 1093 } 1094 1095 /** 1096 * Returns the state of the AudioTrack instance. This is useful after the 1097 * AudioTrack instance has been created to check if it was initialized 1098 * properly. This ensures that the appropriate resources have been acquired. 1099 * @see #STATE_UNINITIALIZED 1100 * @see #STATE_INITIALIZED 1101 * @see #STATE_NO_STATIC_DATA 1102 */ 1103 public int getState() { 1104 return mState; 1105 } 1106 1107 /** 1108 * Returns the playback state of the AudioTrack instance. 1109 * @see #PLAYSTATE_STOPPED 1110 * @see #PLAYSTATE_PAUSED 1111 * @see #PLAYSTATE_PLAYING 1112 */ 1113 public int getPlayState() { 1114 synchronized (mPlayStateLock) { 1115 return mPlayState; 1116 } 1117 } 1118 1119 1120 /** 1121 * Returns the effective size of the <code>AudioTrack</code> buffer 1122 * that the application writes to. 1123 * <p> This will be less than or equal to the result of 1124 * {@link #getBufferCapacityInFrames()}. 1125 * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called. 1126 * <p> If the track is subsequently routed to a different output sink, the buffer 1127 * size and capacity may enlarge to accommodate. 1128 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1129 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1130 * the size of the <code>AudioTrack</code> buffer in bytes. 1131 * <p> See also {@link AudioManager#getProperty(String)} for key 1132 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1133 * @return current size in frames of the <code>AudioTrack</code> buffer. 1134 * @throws IllegalStateException if track is not initialized. 1135 */ 1136 public int getBufferSizeInFrames() { 1137 return native_get_buffer_size_frames(); 1138 } 1139 1140 /** 1141 * Limits the effective size of the <code>AudioTrack</code> buffer 1142 * that the application writes to. 1143 * <p> A write to this AudioTrack will not fill the buffer beyond this limit. 1144 * If a blocking write is used then the write will block until the data 1145 * can fit within this limit. 1146 * <p>Changing this limit modifies the latency associated with 1147 * the buffer for this track. A smaller size will give lower latency 1148 * but there may be more glitches due to buffer underruns. 1149 * <p>The actual size used may not be equal to this requested size. 1150 * It will be limited to a valid range with a maximum of 1151 * {@link #getBufferCapacityInFrames()}. 1152 * It may also be adjusted slightly for internal reasons. 1153 * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE} 1154 * will be returned. 1155 * <p>This method is only supported for PCM audio. 1156 * It is not supported for compressed audio tracks. 1157 * 1158 * @param bufferSizeInFrames requested buffer size in frames 1159 * @return the actual buffer size in frames or an error code, 1160 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION} 1161 * @throws IllegalStateException if track is not initialized. 1162 */ 1163 public int setBufferSizeInFrames(int bufferSizeInFrames) { 1164 if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) { 1165 return ERROR_INVALID_OPERATION; 1166 } 1167 if (bufferSizeInFrames < 0) { 1168 return ERROR_BAD_VALUE; 1169 } 1170 return native_set_buffer_size_frames(bufferSizeInFrames); 1171 } 1172 1173 /** 1174 * Returns the maximum size of the <code>AudioTrack</code> buffer in frames. 1175 * <p> If the track's creation mode is {@link #MODE_STATIC}, 1176 * it is equal to the specified bufferSizeInBytes on construction, converted to frame units. 1177 * A static track's frame count will not change. 1178 * <p> If the track's creation mode is {@link #MODE_STREAM}, 1179 * it is greater than or equal to the specified bufferSizeInBytes converted to frame units. 1180 * For streaming tracks, this value may be rounded up to a larger value if needed by 1181 * the target output sink, and 1182 * if the track is subsequently routed to a different output sink, the 1183 * frame count may enlarge to accommodate. 1184 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1185 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1186 * the size of the <code>AudioTrack</code> buffer in bytes. 1187 * <p> See also {@link AudioManager#getProperty(String)} for key 1188 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1189 * @return maximum size in frames of the <code>AudioTrack</code> buffer. 1190 * @throws IllegalStateException if track is not initialized. 1191 */ 1192 public int getBufferCapacityInFrames() { 1193 return native_get_buffer_capacity_frames(); 1194 } 1195 1196 /** 1197 * Returns the frame count of the native <code>AudioTrack</code> buffer. 1198 * @return current size in frames of the <code>AudioTrack</code> buffer. 1199 * @throws IllegalStateException 1200 * @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead. 1201 */ 1202 @Deprecated 1203 protected int getNativeFrameCount() { 1204 return native_get_buffer_capacity_frames(); 1205 } 1206 1207 /** 1208 * Returns marker position expressed in frames. 1209 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 1210 * or zero if marker is disabled. 1211 */ 1212 public int getNotificationMarkerPosition() { 1213 return native_get_marker_pos(); 1214 } 1215 1216 /** 1217 * Returns the notification update period expressed in frames. 1218 * Zero means that no position update notifications are being delivered. 1219 */ 1220 public int getPositionNotificationPeriod() { 1221 return native_get_pos_update_period(); 1222 } 1223 1224 /** 1225 * Returns the playback head position expressed in frames. 1226 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 1227 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 1228 * This is a continuously advancing counter. It will wrap (overflow) periodically, 1229 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 1230 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 1231 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 1232 * the total number of frames played since reset, 1233 * <i>not</i> the current offset within the buffer. 1234 */ 1235 public int getPlaybackHeadPosition() { 1236 return native_get_position(); 1237 } 1238 1239 /** 1240 * Returns this track's estimated latency in milliseconds. This includes the latency due 1241 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 1242 * 1243 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 1244 * a better solution. 1245 * @hide 1246 */ 1247 public int getLatency() { 1248 return native_get_latency(); 1249 } 1250 1251 /** 1252 * Returns the number of underrun occurrences in the application-level write buffer 1253 * since the AudioTrack was created. 1254 * An underrun occurs if the application does not write audio 1255 * data quickly enough, causing the buffer to underflow 1256 * and a potential audio glitch or pop. 1257 * <p> 1258 * Underruns are less likely when buffer sizes are large. 1259 * It may be possible to eliminate underruns by recreating the AudioTrack with 1260 * a larger buffer. 1261 * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the 1262 * effective size of the buffer. 1263 */ 1264 public int getUnderrunCount() { 1265 return native_get_underrun_count(); 1266 } 1267 1268 /** 1269 * Returns the output sample rate in Hz for the specified stream type. 1270 */ 1271 static public int getNativeOutputSampleRate(int streamType) { 1272 return native_get_output_sample_rate(streamType); 1273 } 1274 1275 /** 1276 * Returns the estimated minimum buffer size required for an AudioTrack 1277 * object to be created in the {@link #MODE_STREAM} mode. 1278 * The size is an estimate because it does not consider either the route or the sink, 1279 * since neither is known yet. Note that this size doesn't 1280 * guarantee a smooth playback under load, and higher values should be chosen according to 1281 * the expected frequency at which the buffer will be refilled with additional data to play. 1282 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 1283 * to a higher value than the initial source sample rate, be sure to configure the buffer size 1284 * based on the highest planned sample rate. 1285 * @param sampleRateInHz the source sample rate expressed in Hz. 1286 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted. 1287 * @param channelConfig describes the configuration of the audio channels. 1288 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 1289 * {@link AudioFormat#CHANNEL_OUT_STEREO} 1290 * @param audioFormat the format in which the audio data is represented. 1291 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 1292 * {@link AudioFormat#ENCODING_PCM_8BIT}, 1293 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1294 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 1295 * or {@link #ERROR} if unable to query for output properties, 1296 * or the minimum buffer size expressed in bytes. 1297 */ 1298 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 1299 int channelCount = 0; 1300 switch(channelConfig) { 1301 case AudioFormat.CHANNEL_OUT_MONO: 1302 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1303 channelCount = 1; 1304 break; 1305 case AudioFormat.CHANNEL_OUT_STEREO: 1306 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1307 channelCount = 2; 1308 break; 1309 default: 1310 if (!isMultichannelConfigSupported(channelConfig)) { 1311 loge("getMinBufferSize(): Invalid channel configuration."); 1312 return ERROR_BAD_VALUE; 1313 } else { 1314 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1315 } 1316 } 1317 1318 if (!AudioFormat.isPublicEncoding(audioFormat)) { 1319 loge("getMinBufferSize(): Invalid audio format."); 1320 return ERROR_BAD_VALUE; 1321 } 1322 1323 // sample rate, note these values are subject to change 1324 // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed 1325 if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) || 1326 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) { 1327 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 1328 return ERROR_BAD_VALUE; 1329 } 1330 1331 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 1332 if (size <= 0) { 1333 loge("getMinBufferSize(): error querying hardware"); 1334 return ERROR; 1335 } 1336 else { 1337 return size; 1338 } 1339 } 1340 1341 /** 1342 * Returns the audio session ID. 1343 * 1344 * @return the ID of the audio session this AudioTrack belongs to. 1345 */ 1346 public int getAudioSessionId() { 1347 return mSessionId; 1348 } 1349 1350 /** 1351 * Poll for a timestamp on demand. 1352 * <p> 1353 * If you need to track timestamps during initial warmup or after a routing or mode change, 1354 * you should request a new timestamp periodically until the reported timestamps 1355 * show that the frame position is advancing, or until it becomes clear that 1356 * timestamps are unavailable for this route. 1357 * <p> 1358 * After the clock is advancing at a stable rate, 1359 * query for a new timestamp approximately once every 10 seconds to once per minute. 1360 * Calling this method more often is inefficient. 1361 * It is also counter-productive to call this method more often than recommended, 1362 * because the short-term differences between successive timestamp reports are not meaningful. 1363 * If you need a high-resolution mapping between frame position and presentation time, 1364 * consider implementing that at application level, based on low-resolution timestamps. 1365 * <p> 1366 * The audio data at the returned position may either already have been 1367 * presented, or may have not yet been presented but is committed to be presented. 1368 * It is not possible to request the time corresponding to a particular position, 1369 * or to request the (fractional) position corresponding to a particular time. 1370 * If you need such features, consider implementing them at application level. 1371 * 1372 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1373 * and owned by caller. 1374 * @return true if a timestamp is available, or false if no timestamp is available. 1375 * If a timestamp if available, 1376 * the AudioTimestamp instance is filled in with a position in frame units, together 1377 * with the estimated time when that frame was presented or is committed to 1378 * be presented. 1379 * In the case that no timestamp is available, any supplied instance is left unaltered. 1380 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 1381 * or during and immediately after a route change. 1382 * A timestamp is permanently unavailable for a given route if the route does not support 1383 * timestamps. In this case, the approximate frame position can be obtained 1384 * using {@link #getPlaybackHeadPosition}. 1385 * However, it may be useful to continue to query for 1386 * timestamps occasionally, to recover after a route change. 1387 */ 1388 // Add this text when the "on new timestamp" API is added: 1389 // Use if you need to get the most recent timestamp outside of the event callback handler. 1390 public boolean getTimestamp(AudioTimestamp timestamp) 1391 { 1392 if (timestamp == null) { 1393 throw new IllegalArgumentException(); 1394 } 1395 // It's unfortunate, but we have to either create garbage every time or use synchronized 1396 long[] longArray = new long[2]; 1397 int ret = native_get_timestamp(longArray); 1398 if (ret != SUCCESS) { 1399 return false; 1400 } 1401 timestamp.framePosition = longArray[0]; 1402 timestamp.nanoTime = longArray[1]; 1403 return true; 1404 } 1405 1406 /** 1407 * Poll for a timestamp on demand. 1408 * <p> 1409 * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code. 1410 * 1411 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1412 * and owned by caller. 1413 * @return {@link #SUCCESS} if a timestamp is available 1414 * {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called 1415 * immediately after start/ACTIVE, when the number of frames consumed is less than the 1416 * overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll 1417 * again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time 1418 * for the timestamp. 1419 * {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1420 * needs to be recreated. 1421 * {@link #ERROR_INVALID_OPERATION} if current route does not support 1422 * timestamps. In this case, the approximate frame position can be obtained 1423 * using {@link #getPlaybackHeadPosition}. 1424 * 1425 * The AudioTimestamp instance is filled in with a position in frame units, together 1426 * with the estimated time when that frame was presented or is committed to 1427 * be presented. 1428 * @hide 1429 */ 1430 // Add this text when the "on new timestamp" API is added: 1431 // Use if you need to get the most recent timestamp outside of the event callback handler. 1432 public int getTimestampWithStatus(AudioTimestamp timestamp) 1433 { 1434 if (timestamp == null) { 1435 throw new IllegalArgumentException(); 1436 } 1437 // It's unfortunate, but we have to either create garbage every time or use synchronized 1438 long[] longArray = new long[2]; 1439 int ret = native_get_timestamp(longArray); 1440 timestamp.framePosition = longArray[0]; 1441 timestamp.nanoTime = longArray[1]; 1442 return ret; 1443 } 1444 1445 //-------------------------------------------------------------------------- 1446 // Initialization / configuration 1447 //-------------------- 1448 /** 1449 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1450 * for each periodic playback head position update. 1451 * Notifications will be received in the same thread as the one in which the AudioTrack 1452 * instance was created. 1453 * @param listener 1454 */ 1455 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 1456 setPlaybackPositionUpdateListener(listener, null); 1457 } 1458 1459 /** 1460 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1461 * for each periodic playback head position update. 1462 * Use this method to receive AudioTrack events in the Handler associated with another 1463 * thread than the one in which you created the AudioTrack instance. 1464 * @param listener 1465 * @param handler the Handler that will receive the event notification messages. 1466 */ 1467 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 1468 Handler handler) { 1469 if (listener != null) { 1470 mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler); 1471 } else { 1472 mEventHandlerDelegate = null; 1473 } 1474 } 1475 1476 1477 private static float clampGainOrLevel(float gainOrLevel) { 1478 if (Float.isNaN(gainOrLevel)) { 1479 throw new IllegalArgumentException(); 1480 } 1481 if (gainOrLevel < GAIN_MIN) { 1482 gainOrLevel = GAIN_MIN; 1483 } else if (gainOrLevel > GAIN_MAX) { 1484 gainOrLevel = GAIN_MAX; 1485 } 1486 return gainOrLevel; 1487 } 1488 1489 1490 /** 1491 * Sets the specified left and right output gain values on the AudioTrack. 1492 * <p>Gain values are clamped to the closed interval [0.0, max] where 1493 * max is the value of {@link #getMaxVolume}. 1494 * A value of 0.0 results in zero gain (silence), and 1495 * a value of 1.0 means unity gain (signal unchanged). 1496 * The default value is 1.0 meaning unity gain. 1497 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1498 * @param leftGain output gain for the left channel. 1499 * @param rightGain output gain for the right channel 1500 * @return error code or success, see {@link #SUCCESS}, 1501 * {@link #ERROR_INVALID_OPERATION} 1502 * @deprecated Applications should use {@link #setVolume} instead, as it 1503 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1504 */ 1505 @Deprecated 1506 public int setStereoVolume(float leftGain, float rightGain) { 1507 if (mState == STATE_UNINITIALIZED) { 1508 return ERROR_INVALID_OPERATION; 1509 } 1510 1511 baseSetVolume(leftGain, rightGain); 1512 return SUCCESS; 1513 } 1514 1515 @Override 1516 void playerSetVolume(float leftVolume, float rightVolume) { 1517 leftVolume = clampGainOrLevel(leftVolume); 1518 rightVolume = clampGainOrLevel(rightVolume); 1519 1520 native_setVolume(leftVolume, rightVolume); 1521 } 1522 1523 1524 /** 1525 * Sets the specified output gain value on all channels of this track. 1526 * <p>Gain values are clamped to the closed interval [0.0, max] where 1527 * max is the value of {@link #getMaxVolume}. 1528 * A value of 0.0 results in zero gain (silence), and 1529 * a value of 1.0 means unity gain (signal unchanged). 1530 * The default value is 1.0 meaning unity gain. 1531 * <p>This API is preferred over {@link #setStereoVolume}, as it 1532 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1533 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1534 * @param gain output gain for all channels. 1535 * @return error code or success, see {@link #SUCCESS}, 1536 * {@link #ERROR_INVALID_OPERATION} 1537 */ 1538 public int setVolume(float gain) { 1539 return setStereoVolume(gain, gain); 1540 } 1541 1542 1543 /** 1544 * Sets the playback sample rate for this track. This sets the sampling rate at which 1545 * the audio data will be consumed and played back 1546 * (as set by the sampleRateInHz parameter in the 1547 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 1548 * not the original sampling rate of the 1549 * content. For example, setting it to half the sample rate of the content will cause the 1550 * playback to last twice as long, but will also result in a pitch shift down by one octave. 1551 * The valid sample rate range is from 1 Hz to twice the value returned by 1552 * {@link #getNativeOutputSampleRate(int)}. 1553 * Use {@link #setPlaybackParams(PlaybackParams)} for speed control. 1554 * <p> This method may also be used to repurpose an existing <code>AudioTrack</code> 1555 * for playback of content of differing sample rate, 1556 * but with identical encoding and channel mask. 1557 * @param sampleRateInHz the sample rate expressed in Hz 1558 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1559 * {@link #ERROR_INVALID_OPERATION} 1560 */ 1561 public int setPlaybackRate(int sampleRateInHz) { 1562 if (mState != STATE_INITIALIZED) { 1563 return ERROR_INVALID_OPERATION; 1564 } 1565 if (sampleRateInHz <= 0) { 1566 return ERROR_BAD_VALUE; 1567 } 1568 return native_set_playback_rate(sampleRateInHz); 1569 } 1570 1571 1572 /** 1573 * Sets the playback parameters. 1574 * This method returns failure if it cannot apply the playback parameters. 1575 * One possible cause is that the parameters for speed or pitch are out of range. 1576 * Another possible cause is that the <code>AudioTrack</code> is streaming 1577 * (see {@link #MODE_STREAM}) and the 1578 * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer 1579 * on configuration must be larger than the speed multiplied by the minimum size 1580 * {@link #getMinBufferSize(int, int, int)}) to allow proper playback. 1581 * @param params see {@link PlaybackParams}. In particular, 1582 * speed, pitch, and audio mode should be set. 1583 * @throws IllegalArgumentException if the parameters are invalid or not accepted. 1584 * @throws IllegalStateException if track is not initialized. 1585 */ 1586 public void setPlaybackParams(@NonNull PlaybackParams params) { 1587 if (params == null) { 1588 throw new IllegalArgumentException("params is null"); 1589 } 1590 native_set_playback_params(params); 1591 } 1592 1593 1594 /** 1595 * Sets the position of the notification marker. At most one marker can be active. 1596 * @param markerInFrames marker position in wrapping frame units similar to 1597 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 1598 * To set a marker at a position which would appear as zero due to wraparound, 1599 * a workaround is to use a non-zero position near zero, such as -1 or 1. 1600 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1601 * {@link #ERROR_INVALID_OPERATION} 1602 */ 1603 public int setNotificationMarkerPosition(int markerInFrames) { 1604 if (mState == STATE_UNINITIALIZED) { 1605 return ERROR_INVALID_OPERATION; 1606 } 1607 return native_set_marker_pos(markerInFrames); 1608 } 1609 1610 1611 /** 1612 * Sets the period for the periodic notification event. 1613 * @param periodInFrames update period expressed in frames. 1614 * Zero period means no position updates. A negative period is not allowed. 1615 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 1616 */ 1617 public int setPositionNotificationPeriod(int periodInFrames) { 1618 if (mState == STATE_UNINITIALIZED) { 1619 return ERROR_INVALID_OPERATION; 1620 } 1621 return native_set_pos_update_period(periodInFrames); 1622 } 1623 1624 1625 /** 1626 * Sets the playback head position within the static buffer. 1627 * The track must be stopped or paused for the position to be changed, 1628 * and must use the {@link #MODE_STATIC} mode. 1629 * @param positionInFrames playback head position within buffer, expressed in frames. 1630 * Zero corresponds to start of buffer. 1631 * The position must not be greater than the buffer size in frames, or negative. 1632 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 1633 * the position values have different meanings. 1634 * <br> 1635 * If looping is currently enabled and the new position is greater than or equal to the 1636 * loop end marker, the behavior varies by API level: 1637 * as of {@link android.os.Build.VERSION_CODES#M}, 1638 * the looping is first disabled and then the position is set. 1639 * For earlier API levels, the behavior is unspecified. 1640 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1641 * {@link #ERROR_INVALID_OPERATION} 1642 */ 1643 public int setPlaybackHeadPosition(int positionInFrames) { 1644 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1645 getPlayState() == PLAYSTATE_PLAYING) { 1646 return ERROR_INVALID_OPERATION; 1647 } 1648 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 1649 return ERROR_BAD_VALUE; 1650 } 1651 return native_set_position(positionInFrames); 1652 } 1653 1654 /** 1655 * Sets the loop points and the loop count. The loop can be infinite. 1656 * Similarly to setPlaybackHeadPosition, 1657 * the track must be stopped or paused for the loop points to be changed, 1658 * and must use the {@link #MODE_STATIC} mode. 1659 * @param startInFrames loop start marker expressed in frames. 1660 * Zero corresponds to start of buffer. 1661 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 1662 * @param endInFrames loop end marker expressed in frames. 1663 * The total buffer size in frames corresponds to end of buffer. 1664 * The end marker must not be greater than the buffer size in frames. 1665 * For looping, the end marker must not be less than or equal to the start marker, 1666 * but to disable looping 1667 * it is permitted for start marker, end marker, and loop count to all be 0. 1668 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 1669 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 1670 * support, 1671 * {@link #ERROR_BAD_VALUE} is returned. 1672 * The loop range is the interval [startInFrames, endInFrames). 1673 * <br> 1674 * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged, 1675 * unless it is greater than or equal to the loop end marker, in which case 1676 * it is forced to the loop start marker. 1677 * For earlier API levels, the effect on position is unspecified. 1678 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 1679 * A value of -1 means infinite looping, and 0 disables looping. 1680 * A value of positive N means to "loop" (go back) N times. For example, 1681 * a value of one means to play the region two times in total. 1682 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1683 * {@link #ERROR_INVALID_OPERATION} 1684 */ 1685 public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) { 1686 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1687 getPlayState() == PLAYSTATE_PLAYING) { 1688 return ERROR_INVALID_OPERATION; 1689 } 1690 if (loopCount == 0) { 1691 ; // explicitly allowed as an exception to the loop region range check 1692 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 1693 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 1694 return ERROR_BAD_VALUE; 1695 } 1696 return native_set_loop(startInFrames, endInFrames, loopCount); 1697 } 1698 1699 /** 1700 * Sets the initialization state of the instance. This method was originally intended to be used 1701 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 1702 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 1703 * @param state the state of the AudioTrack instance 1704 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 1705 */ 1706 @Deprecated 1707 protected void setState(int state) { 1708 mState = state; 1709 } 1710 1711 1712 //--------------------------------------------------------- 1713 // Transport control methods 1714 //-------------------- 1715 /** 1716 * Starts playing an AudioTrack. 1717 * <p> 1718 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 1719 * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)}, 1720 * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)}, 1721 * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to 1722 * play(). 1723 * <p> 1724 * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to 1725 * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor). 1726 * If you don't call write() first, or if you call write() but with an insufficient amount of 1727 * data, then the track will be in underrun state at play(). In this case, 1728 * playback will not actually start playing until the data path is filled to a 1729 * device-specific minimum level. This requirement for the path to be filled 1730 * to a minimum level is also true when resuming audio playback after calling stop(). 1731 * Similarly the buffer will need to be filled up again after 1732 * the track underruns due to failure to call write() in a timely manner with sufficient data. 1733 * For portability, an application should prime the data path to the maximum allowed 1734 * by writing data until the write() method returns a short transfer count. 1735 * This allows play() to start immediately, and reduces the chance of underrun. 1736 * 1737 * @throws IllegalStateException if the track isn't properly initialized 1738 */ 1739 public void play() 1740 throws IllegalStateException { 1741 if (mState != STATE_INITIALIZED) { 1742 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 1743 } 1744 baseStart(); 1745 synchronized(mPlayStateLock) { 1746 native_start(); 1747 mPlayState = PLAYSTATE_PLAYING; 1748 } 1749 } 1750 1751 /** 1752 * Stops playing the audio data. 1753 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 1754 * after the last buffer that was written has been played. For an immediate stop, use 1755 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 1756 * back yet. 1757 * @throws IllegalStateException 1758 */ 1759 public void stop() 1760 throws IllegalStateException { 1761 if (mState != STATE_INITIALIZED) { 1762 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 1763 } 1764 1765 // stop playing 1766 synchronized(mPlayStateLock) { 1767 native_stop(); 1768 mPlayState = PLAYSTATE_STOPPED; 1769 mAvSyncHeader = null; 1770 mAvSyncBytesRemaining = 0; 1771 } 1772 } 1773 1774 /** 1775 * Pauses the playback of the audio data. Data that has not been played 1776 * back will not be discarded. Subsequent calls to {@link #play} will play 1777 * this data back. See {@link #flush()} to discard this data. 1778 * 1779 * @throws IllegalStateException 1780 */ 1781 public void pause() 1782 throws IllegalStateException { 1783 if (mState != STATE_INITIALIZED) { 1784 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 1785 } 1786 //logd("pause()"); 1787 1788 // pause playback 1789 synchronized(mPlayStateLock) { 1790 native_pause(); 1791 mPlayState = PLAYSTATE_PAUSED; 1792 } 1793 } 1794 1795 1796 //--------------------------------------------------------- 1797 // Audio data supply 1798 //-------------------- 1799 1800 /** 1801 * Flushes the audio data currently queued for playback. Any data that has 1802 * been written but not yet presented will be discarded. No-op if not stopped or paused, 1803 * or if the track's creation mode is not {@link #MODE_STREAM}. 1804 * <BR> Note that although data written but not yet presented is discarded, there is no 1805 * guarantee that all of the buffer space formerly used by that data 1806 * is available for a subsequent write. 1807 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 1808 * less than or equal to the total buffer size 1809 * may return a short actual transfer count. 1810 */ 1811 public void flush() { 1812 if (mState == STATE_INITIALIZED) { 1813 // flush the data in native layer 1814 native_flush(); 1815 mAvSyncHeader = null; 1816 mAvSyncBytesRemaining = 0; 1817 } 1818 1819 } 1820 1821 /** 1822 * Writes the audio data to the audio sink for playback (streaming mode), 1823 * or copies audio data for later playback (static buffer mode). 1824 * The format specified in the AudioTrack constructor should be 1825 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1826 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 1827 * <p> 1828 * In streaming mode, the write will normally block until all the data has been enqueued for 1829 * playback, and will return a full transfer count. However, if the track is stopped or paused 1830 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1831 * occurs during the write, then the write may return a short transfer count. 1832 * <p> 1833 * In static buffer mode, copies the data to the buffer starting at offset 0. 1834 * Note that the actual playback of this data might occur after this function returns. 1835 * 1836 * @param audioData the array that holds the data to play. 1837 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 1838 * starts. 1839 * Must not be negative, or cause the data access to go out of bounds of the array. 1840 * @param sizeInBytes the number of bytes to write in audioData after the offset. 1841 * Must not be negative, or cause the data access to go out of bounds of the array. 1842 * @return zero or the positive number of bytes that were written, or one of the following 1843 * error codes. The number of bytes will be a multiple of the frame size in bytes 1844 * not to exceed sizeInBytes. 1845 * <ul> 1846 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 1847 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 1848 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1849 * needs to be recreated. The dead object error code is not returned if some data was 1850 * successfully transferred. In this case, the error is returned at the next write()</li> 1851 * <li>{@link #ERROR} in case of other error</li> 1852 * </ul> 1853 * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code> 1854 * set to {@link #WRITE_BLOCKING}. 1855 */ 1856 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) { 1857 return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING); 1858 } 1859 1860 /** 1861 * Writes the audio data to the audio sink for playback (streaming mode), 1862 * or copies audio data for later playback (static buffer mode). 1863 * The format specified in the AudioTrack constructor should be 1864 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1865 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 1866 * <p> 1867 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1868 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1869 * for playback, and will return a full transfer count. However, if the write mode is 1870 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1871 * interrupts the write by calling stop or pause, or an I/O error 1872 * occurs during the write, then the write may return a short transfer count. 1873 * <p> 1874 * In static buffer mode, copies the data to the buffer starting at offset 0, 1875 * and the write mode is ignored. 1876 * Note that the actual playback of this data might occur after this function returns. 1877 * 1878 * @param audioData the array that holds the data to play. 1879 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 1880 * starts. 1881 * Must not be negative, or cause the data access to go out of bounds of the array. 1882 * @param sizeInBytes the number of bytes to write in audioData after the offset. 1883 * Must not be negative, or cause the data access to go out of bounds of the array. 1884 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1885 * effect in static mode. 1886 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1887 * to the audio sink. 1888 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1889 * queuing as much audio data for playback as possible without blocking. 1890 * @return zero or the positive number of bytes that were written, or one of the following 1891 * error codes. The number of bytes will be a multiple of the frame size in bytes 1892 * not to exceed sizeInBytes. 1893 * <ul> 1894 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 1895 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 1896 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1897 * needs to be recreated. The dead object error code is not returned if some data was 1898 * successfully transferred. In this case, the error is returned at the next write()</li> 1899 * <li>{@link #ERROR} in case of other error</li> 1900 * </ul> 1901 */ 1902 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes, 1903 @WriteMode int writeMode) { 1904 1905 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1906 return ERROR_INVALID_OPERATION; 1907 } 1908 1909 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1910 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1911 return ERROR_BAD_VALUE; 1912 } 1913 1914 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 1915 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 1916 || (offsetInBytes + sizeInBytes > audioData.length)) { 1917 return ERROR_BAD_VALUE; 1918 } 1919 1920 int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 1921 writeMode == WRITE_BLOCKING); 1922 1923 if ((mDataLoadMode == MODE_STATIC) 1924 && (mState == STATE_NO_STATIC_DATA) 1925 && (ret > 0)) { 1926 // benign race with respect to other APIs that read mState 1927 mState = STATE_INITIALIZED; 1928 } 1929 1930 return ret; 1931 } 1932 1933 /** 1934 * Writes the audio data to the audio sink for playback (streaming mode), 1935 * or copies audio data for later playback (static buffer mode). 1936 * The format specified in the AudioTrack constructor should be 1937 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1938 * <p> 1939 * In streaming mode, the write will normally block until all the data has been enqueued for 1940 * playback, and will return a full transfer count. However, if the track is stopped or paused 1941 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1942 * occurs during the write, then the write may return a short transfer count. 1943 * <p> 1944 * In static buffer mode, copies the data to the buffer starting at offset 0. 1945 * Note that the actual playback of this data might occur after this function returns. 1946 * 1947 * @param audioData the array that holds the data to play. 1948 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1949 * starts. 1950 * Must not be negative, or cause the data access to go out of bounds of the array. 1951 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1952 * Must not be negative, or cause the data access to go out of bounds of the array. 1953 * @return zero or the positive number of shorts that were written, or one of the following 1954 * error codes. The number of shorts will be a multiple of the channel count not to 1955 * exceed sizeInShorts. 1956 * <ul> 1957 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 1958 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 1959 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1960 * needs to be recreated. The dead object error code is not returned if some data was 1961 * successfully transferred. In this case, the error is returned at the next write()</li> 1962 * <li>{@link #ERROR} in case of other error</li> 1963 * </ul> 1964 * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code> 1965 * set to {@link #WRITE_BLOCKING}. 1966 */ 1967 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) { 1968 return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING); 1969 } 1970 1971 /** 1972 * Writes the audio data to the audio sink for playback (streaming mode), 1973 * or copies audio data for later playback (static buffer mode). 1974 * The format specified in the AudioTrack constructor should be 1975 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1976 * <p> 1977 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1978 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1979 * for playback, and will return a full transfer count. However, if the write mode is 1980 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1981 * interrupts the write by calling stop or pause, or an I/O error 1982 * occurs during the write, then the write may return a short transfer count. 1983 * <p> 1984 * In static buffer mode, copies the data to the buffer starting at offset 0. 1985 * Note that the actual playback of this data might occur after this function returns. 1986 * 1987 * @param audioData the array that holds the data to write. 1988 * @param offsetInShorts the offset expressed in shorts in audioData where the data to write 1989 * starts. 1990 * Must not be negative, or cause the data access to go out of bounds of the array. 1991 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1992 * Must not be negative, or cause the data access to go out of bounds of the array. 1993 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1994 * effect in static mode. 1995 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1996 * to the audio sink. 1997 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1998 * queuing as much audio data for playback as possible without blocking. 1999 * @return zero or the positive number of shorts that were written, or one of the following 2000 * error codes. The number of shorts will be a multiple of the channel count not to 2001 * exceed sizeInShorts. 2002 * <ul> 2003 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2004 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2005 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2006 * needs to be recreated. The dead object error code is not returned if some data was 2007 * successfully transferred. In this case, the error is returned at the next write()</li> 2008 * <li>{@link #ERROR} in case of other error</li> 2009 * </ul> 2010 */ 2011 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts, 2012 @WriteMode int writeMode) { 2013 2014 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 2015 return ERROR_INVALID_OPERATION; 2016 } 2017 2018 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2019 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2020 return ERROR_BAD_VALUE; 2021 } 2022 2023 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 2024 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 2025 || (offsetInShorts + sizeInShorts > audioData.length)) { 2026 return ERROR_BAD_VALUE; 2027 } 2028 2029 int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat, 2030 writeMode == WRITE_BLOCKING); 2031 2032 if ((mDataLoadMode == MODE_STATIC) 2033 && (mState == STATE_NO_STATIC_DATA) 2034 && (ret > 0)) { 2035 // benign race with respect to other APIs that read mState 2036 mState = STATE_INITIALIZED; 2037 } 2038 2039 return ret; 2040 } 2041 2042 /** 2043 * Writes the audio data to the audio sink for playback (streaming mode), 2044 * or copies audio data for later playback (static buffer mode). 2045 * The format specified in the AudioTrack constructor should be 2046 * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array. 2047 * <p> 2048 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2049 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2050 * for playback, and will return a full transfer count. However, if the write mode is 2051 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2052 * interrupts the write by calling stop or pause, or an I/O error 2053 * occurs during the write, then the write may return a short transfer count. 2054 * <p> 2055 * In static buffer mode, copies the data to the buffer starting at offset 0, 2056 * and the write mode is ignored. 2057 * Note that the actual playback of this data might occur after this function returns. 2058 * 2059 * @param audioData the array that holds the data to write. 2060 * The implementation does not clip for sample values within the nominal range 2061 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 2062 * less than or equal to unity (1.0f), and in the absence of post-processing effects 2063 * that could add energy, such as reverb. For the convenience of applications 2064 * that compute samples using filters with non-unity gain, 2065 * sample values +3 dB beyond the nominal range are permitted. 2066 * However such values may eventually be limited or clipped, depending on various gains 2067 * and later processing in the audio path. Therefore applications are encouraged 2068 * to provide samples values within the nominal range. 2069 * @param offsetInFloats the offset, expressed as a number of floats, 2070 * in audioData where the data to write starts. 2071 * Must not be negative, or cause the data access to go out of bounds of the array. 2072 * @param sizeInFloats the number of floats to write in audioData after the offset. 2073 * Must not be negative, or cause the data access to go out of bounds of the array. 2074 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2075 * effect in static mode. 2076 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2077 * to the audio sink. 2078 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2079 * queuing as much audio data for playback as possible without blocking. 2080 * @return zero or the positive number of floats that were written, or one of the following 2081 * error codes. The number of floats will be a multiple of the channel count not to 2082 * exceed sizeInFloats. 2083 * <ul> 2084 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2085 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2086 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2087 * needs to be recreated. The dead object error code is not returned if some data was 2088 * successfully transferred. In this case, the error is returned at the next write()</li> 2089 * <li>{@link #ERROR} in case of other error</li> 2090 * </ul> 2091 */ 2092 public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats, 2093 @WriteMode int writeMode) { 2094 2095 if (mState == STATE_UNINITIALIZED) { 2096 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2097 return ERROR_INVALID_OPERATION; 2098 } 2099 2100 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 2101 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 2102 return ERROR_INVALID_OPERATION; 2103 } 2104 2105 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2106 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2107 return ERROR_BAD_VALUE; 2108 } 2109 2110 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 2111 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 2112 || (offsetInFloats + sizeInFloats > audioData.length)) { 2113 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 2114 return ERROR_BAD_VALUE; 2115 } 2116 2117 int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 2118 writeMode == WRITE_BLOCKING); 2119 2120 if ((mDataLoadMode == MODE_STATIC) 2121 && (mState == STATE_NO_STATIC_DATA) 2122 && (ret > 0)) { 2123 // benign race with respect to other APIs that read mState 2124 mState = STATE_INITIALIZED; 2125 } 2126 2127 return ret; 2128 } 2129 2130 2131 /** 2132 * Writes the audio data to the audio sink for playback (streaming mode), 2133 * or copies audio data for later playback (static buffer mode). 2134 * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor. 2135 * <p> 2136 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2137 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2138 * for playback, and will return a full transfer count. However, if the write mode is 2139 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2140 * interrupts the write by calling stop or pause, or an I/O error 2141 * occurs during the write, then the write may return a short transfer count. 2142 * <p> 2143 * In static buffer mode, copies the data to the buffer starting at offset 0, 2144 * and the write mode is ignored. 2145 * Note that the actual playback of this data might occur after this function returns. 2146 * 2147 * @param audioData the buffer that holds the data to write, starting at the position reported 2148 * by <code>audioData.position()</code>. 2149 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2150 * have been advanced to reflect the amount of data that was successfully written to 2151 * the AudioTrack. 2152 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2153 * that the number of bytes requested be a multiple of the frame size (sample size in 2154 * bytes multiplied by the channel count). 2155 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2156 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2157 * effect in static mode. 2158 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2159 * to the audio sink. 2160 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2161 * queuing as much audio data for playback as possible without blocking. 2162 * @return zero or the positive number of bytes that were written, or one of the following 2163 * error codes. 2164 * <ul> 2165 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2166 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2167 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2168 * needs to be recreated. The dead object error code is not returned if some data was 2169 * successfully transferred. In this case, the error is returned at the next write()</li> 2170 * <li>{@link #ERROR} in case of other error</li> 2171 * </ul> 2172 */ 2173 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2174 @WriteMode int writeMode) { 2175 2176 if (mState == STATE_UNINITIALIZED) { 2177 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2178 return ERROR_INVALID_OPERATION; 2179 } 2180 2181 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2182 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2183 return ERROR_BAD_VALUE; 2184 } 2185 2186 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2187 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2188 return ERROR_BAD_VALUE; 2189 } 2190 2191 int ret = 0; 2192 if (audioData.isDirect()) { 2193 ret = native_write_native_bytes(audioData, 2194 audioData.position(), sizeInBytes, mAudioFormat, 2195 writeMode == WRITE_BLOCKING); 2196 } else { 2197 ret = native_write_byte(NioUtils.unsafeArray(audioData), 2198 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 2199 sizeInBytes, mAudioFormat, 2200 writeMode == WRITE_BLOCKING); 2201 } 2202 2203 if ((mDataLoadMode == MODE_STATIC) 2204 && (mState == STATE_NO_STATIC_DATA) 2205 && (ret > 0)) { 2206 // benign race with respect to other APIs that read mState 2207 mState = STATE_INITIALIZED; 2208 } 2209 2210 if (ret > 0) { 2211 audioData.position(audioData.position() + ret); 2212 } 2213 2214 return ret; 2215 } 2216 2217 /** 2218 * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track. 2219 * The blocking behavior will depend on the write mode. 2220 * @param audioData the buffer that holds the data to write, starting at the position reported 2221 * by <code>audioData.position()</code>. 2222 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2223 * have been advanced to reflect the amount of data that was successfully written to 2224 * the AudioTrack. 2225 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2226 * that the number of bytes requested be a multiple of the frame size (sample size in 2227 * bytes multiplied by the channel count). 2228 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2229 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. 2230 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2231 * to the audio sink. 2232 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2233 * queuing as much audio data for playback as possible without blocking. 2234 * @param timestamp The timestamp of the first decodable audio frame in the provided audioData. 2235 * @return zero or the positive number of bytes that were written, or one of the following 2236 * error codes. 2237 * <ul> 2238 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2239 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2240 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2241 * needs to be recreated. The dead object error code is not returned if some data was 2242 * successfully transferred. In this case, the error is returned at the next write()</li> 2243 * <li>{@link #ERROR} in case of other error</li> 2244 * </ul> 2245 */ 2246 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2247 @WriteMode int writeMode, long timestamp) { 2248 2249 if (mState == STATE_UNINITIALIZED) { 2250 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2251 return ERROR_INVALID_OPERATION; 2252 } 2253 2254 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2255 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2256 return ERROR_BAD_VALUE; 2257 } 2258 2259 if (mDataLoadMode != MODE_STREAM) { 2260 Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track"); 2261 return ERROR_INVALID_OPERATION; 2262 } 2263 2264 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) { 2265 Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts..."); 2266 return write(audioData, sizeInBytes, writeMode); 2267 } 2268 2269 if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2270 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2271 return ERROR_BAD_VALUE; 2272 } 2273 2274 // create timestamp header if none exists 2275 if (mAvSyncHeader == null) { 2276 mAvSyncHeader = ByteBuffer.allocate(16); 2277 mAvSyncHeader.order(ByteOrder.BIG_ENDIAN); 2278 mAvSyncHeader.putInt(0x55550001); 2279 mAvSyncHeader.putInt(sizeInBytes); 2280 mAvSyncHeader.putLong(timestamp); 2281 mAvSyncHeader.position(0); 2282 mAvSyncBytesRemaining = sizeInBytes; 2283 } 2284 2285 // write timestamp header if not completely written already 2286 int ret = 0; 2287 if (mAvSyncHeader.remaining() != 0) { 2288 ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode); 2289 if (ret < 0) { 2290 Log.e(TAG, "AudioTrack.write() could not write timestamp header!"); 2291 mAvSyncHeader = null; 2292 mAvSyncBytesRemaining = 0; 2293 return ret; 2294 } 2295 if (mAvSyncHeader.remaining() > 0) { 2296 Log.v(TAG, "AudioTrack.write() partial timestamp header written."); 2297 return 0; 2298 } 2299 } 2300 2301 // write audio data 2302 int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes); 2303 ret = write(audioData, sizeToWrite, writeMode); 2304 if (ret < 0) { 2305 Log.e(TAG, "AudioTrack.write() could not write audio data!"); 2306 mAvSyncHeader = null; 2307 mAvSyncBytesRemaining = 0; 2308 return ret; 2309 } 2310 2311 mAvSyncBytesRemaining -= ret; 2312 if (mAvSyncBytesRemaining == 0) { 2313 mAvSyncHeader = null; 2314 } 2315 2316 return ret; 2317 } 2318 2319 2320 /** 2321 * Sets the playback head position within the static buffer to zero, 2322 * that is it rewinds to start of static buffer. 2323 * The track must be stopped or paused, and 2324 * the track's creation mode must be {@link #MODE_STATIC}. 2325 * <p> 2326 * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by 2327 * {@link #getPlaybackHeadPosition()} to zero. 2328 * For earlier API levels, the reset behavior is unspecified. 2329 * <p> 2330 * Use {@link #setPlaybackHeadPosition(int)} with a zero position 2331 * if the reset of <code>getPlaybackHeadPosition()</code> is not needed. 2332 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2333 * {@link #ERROR_INVALID_OPERATION} 2334 */ 2335 public int reloadStaticData() { 2336 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 2337 return ERROR_INVALID_OPERATION; 2338 } 2339 return native_reload_static(); 2340 } 2341 2342 //-------------------------------------------------------------------------- 2343 // Audio effects management 2344 //-------------------- 2345 2346 /** 2347 * Attaches an auxiliary effect to the audio track. A typical auxiliary 2348 * effect is a reverberation effect which can be applied on any sound source 2349 * that directs a certain amount of its energy to this effect. This amount 2350 * is defined by setAuxEffectSendLevel(). 2351 * {@see #setAuxEffectSendLevel(float)}. 2352 * <p>After creating an auxiliary effect (e.g. 2353 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 2354 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 2355 * this method to attach the audio track to the effect. 2356 * <p>To detach the effect from the audio track, call this method with a 2357 * null effect id. 2358 * 2359 * @param effectId system wide unique id of the effect to attach 2360 * @return error code or success, see {@link #SUCCESS}, 2361 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 2362 */ 2363 public int attachAuxEffect(int effectId) { 2364 if (mState == STATE_UNINITIALIZED) { 2365 return ERROR_INVALID_OPERATION; 2366 } 2367 return native_attachAuxEffect(effectId); 2368 } 2369 2370 /** 2371 * Sets the send level of the audio track to the attached auxiliary effect 2372 * {@link #attachAuxEffect(int)}. Effect levels 2373 * are clamped to the closed interval [0.0, max] where 2374 * max is the value of {@link #getMaxVolume}. 2375 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 2376 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 2377 * this method must be called for the effect to be applied. 2378 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 2379 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 2380 * so an appropriate conversion from linear UI input x to level is: 2381 * x == 0 -> level = 0 2382 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 2383 * 2384 * @param level linear send level 2385 * @return error code or success, see {@link #SUCCESS}, 2386 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 2387 */ 2388 public int setAuxEffectSendLevel(float level) { 2389 if (mState == STATE_UNINITIALIZED) { 2390 return ERROR_INVALID_OPERATION; 2391 } 2392 return baseSetAuxEffectSendLevel(level); 2393 } 2394 2395 @Override 2396 int playerSetAuxEffectSendLevel(float level) { 2397 level = clampGainOrLevel(level); 2398 int err = native_setAuxEffectSendLevel(level); 2399 return err == 0 ? SUCCESS : ERROR; 2400 } 2401 2402 //-------------------------------------------------------------------------- 2403 // Explicit Routing 2404 //-------------------- 2405 private AudioDeviceInfo mPreferredDevice = null; 2406 2407 /** 2408 * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route 2409 * the output from this AudioTrack. 2410 * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink. 2411 * If deviceInfo is null, default routing is restored. 2412 * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and 2413 * does not correspond to a valid audio output device. 2414 */ 2415 @Override 2416 public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) { 2417 // Do some validation.... 2418 if (deviceInfo != null && !deviceInfo.isSink()) { 2419 return false; 2420 } 2421 int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0; 2422 boolean status = native_setOutputDevice(preferredDeviceId); 2423 if (status == true) { 2424 synchronized (this) { 2425 mPreferredDevice = deviceInfo; 2426 } 2427 } 2428 return status; 2429 } 2430 2431 /** 2432 * Returns the selected output specified by {@link #setPreferredDevice}. Note that this 2433 * is not guaranteed to correspond to the actual device being used for playback. 2434 */ 2435 @Override 2436 public AudioDeviceInfo getPreferredDevice() { 2437 synchronized (this) { 2438 return mPreferredDevice; 2439 } 2440 } 2441 2442 /** 2443 * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack. 2444 * Note: The query is only valid if the AudioTrack is currently playing. If it is not, 2445 * <code>getRoutedDevice()</code> will return null. 2446 */ 2447 @Override 2448 public AudioDeviceInfo getRoutedDevice() { 2449 int deviceId = native_getRoutedDeviceId(); 2450 if (deviceId == 0) { 2451 return null; 2452 } 2453 AudioDeviceInfo[] devices = 2454 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS); 2455 for (int i = 0; i < devices.length; i++) { 2456 if (devices[i].getId() == deviceId) { 2457 return devices[i]; 2458 } 2459 } 2460 return null; 2461 } 2462 2463 /* 2464 * Call BEFORE adding a routing callback handler. 2465 */ 2466 private void testEnableNativeRoutingCallbacksLocked() { 2467 if (mRoutingChangeListeners.size() == 0) { 2468 native_enableDeviceCallback(); 2469 } 2470 } 2471 2472 /* 2473 * Call AFTER removing a routing callback handler. 2474 */ 2475 private void testDisableNativeRoutingCallbacksLocked() { 2476 if (mRoutingChangeListeners.size() == 0) { 2477 native_disableDeviceCallback(); 2478 } 2479 } 2480 2481 //-------------------------------------------------------------------------- 2482 // (Re)Routing Info 2483 //-------------------- 2484 /** 2485 * The list of AudioRouting.OnRoutingChangedListener interfaces added (with 2486 * {@link AudioRecord#addOnRoutingChangedListener} by an app to receive 2487 * (re)routing notifications. 2488 */ 2489 @GuardedBy("mRoutingChangeListeners") 2490 private ArrayMap<AudioRouting.OnRoutingChangedListener, 2491 NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>(); 2492 2493 /** 2494 * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing 2495 * changes on this AudioTrack. 2496 * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive 2497 * notifications of rerouting events. 2498 * @param handler Specifies the {@link Handler} object for the thread on which to execute 2499 * the callback. If <code>null</code>, the {@link Handler} associated with the main 2500 * {@link Looper} will be used. 2501 */ 2502 @Override 2503 public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, 2504 Handler handler) { 2505 synchronized (mRoutingChangeListeners) { 2506 if (listener != null && !mRoutingChangeListeners.containsKey(listener)) { 2507 testEnableNativeRoutingCallbacksLocked(); 2508 mRoutingChangeListeners.put( 2509 listener, new NativeRoutingEventHandlerDelegate(this, listener, 2510 handler != null ? handler : new Handler(mInitializationLooper))); 2511 } 2512 } 2513 } 2514 2515 /** 2516 * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added 2517 * to receive rerouting notifications. 2518 * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface 2519 * to remove. 2520 */ 2521 @Override 2522 public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) { 2523 synchronized (mRoutingChangeListeners) { 2524 if (mRoutingChangeListeners.containsKey(listener)) { 2525 mRoutingChangeListeners.remove(listener); 2526 } 2527 testDisableNativeRoutingCallbacksLocked(); 2528 } 2529 } 2530 2531 //-------------------------------------------------------------------------- 2532 // (Re)Routing Info 2533 //-------------------- 2534 /** 2535 * Defines the interface by which applications can receive notifications of 2536 * routing changes for the associated {@link AudioTrack}. 2537 * 2538 * @deprecated users should switch to the general purpose 2539 * {@link AudioRouting.OnRoutingChangedListener} class instead. 2540 */ 2541 @Deprecated 2542 public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener { 2543 /** 2544 * Called when the routing of an AudioTrack changes from either and 2545 * explicit or policy rerouting. Use {@link #getRoutedDevice()} to 2546 * retrieve the newly routed-to device. 2547 */ 2548 public void onRoutingChanged(AudioTrack audioTrack); 2549 2550 @Override 2551 default public void onRoutingChanged(AudioRouting router) { 2552 if (router instanceof AudioTrack) { 2553 onRoutingChanged((AudioTrack) router); 2554 } 2555 } 2556 } 2557 2558 /** 2559 * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes 2560 * on this AudioTrack. 2561 * @param listener The {@link OnRoutingChangedListener} interface to receive notifications 2562 * of rerouting events. 2563 * @param handler Specifies the {@link Handler} object for the thread on which to execute 2564 * the callback. If <code>null</code>, the {@link Handler} associated with the main 2565 * {@link Looper} will be used. 2566 * @deprecated users should switch to the general purpose 2567 * {@link AudioRouting.OnRoutingChangedListener} class instead. 2568 */ 2569 @Deprecated 2570 public void addOnRoutingChangedListener(OnRoutingChangedListener listener, 2571 android.os.Handler handler) { 2572 addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler); 2573 } 2574 2575 /** 2576 * Removes an {@link OnRoutingChangedListener} which has been previously added 2577 * to receive rerouting notifications. 2578 * @param listener The previously added {@link OnRoutingChangedListener} interface to remove. 2579 * @deprecated users should switch to the general purpose 2580 * {@link AudioRouting.OnRoutingChangedListener} class instead. 2581 */ 2582 @Deprecated 2583 public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) { 2584 removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener); 2585 } 2586 2587 /** 2588 * Sends device list change notification to all listeners. 2589 */ 2590 private void broadcastRoutingChange() { 2591 AudioManager.resetAudioPortGeneration(); 2592 synchronized (mRoutingChangeListeners) { 2593 for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) { 2594 Handler handler = delegate.getHandler(); 2595 if (handler != null) { 2596 handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE); 2597 } 2598 } 2599 } 2600 } 2601 2602 //--------------------------------------------------------- 2603 // Interface definitions 2604 //-------------------- 2605 /** 2606 * Interface definition for a callback to be invoked when the playback head position of 2607 * an AudioTrack has reached a notification marker or has increased by a certain period. 2608 */ 2609 public interface OnPlaybackPositionUpdateListener { 2610 /** 2611 * Called on the listener to notify it that the previously set marker has been reached 2612 * by the playback head. 2613 */ 2614 void onMarkerReached(AudioTrack track); 2615 2616 /** 2617 * Called on the listener to periodically notify it that the playback head has reached 2618 * a multiple of the notification period. 2619 */ 2620 void onPeriodicNotification(AudioTrack track); 2621 } 2622 2623 //--------------------------------------------------------- 2624 // Inner classes 2625 //-------------------- 2626 /** 2627 * Helper class to handle the forwarding of native events to the appropriate listener 2628 * (potentially) handled in a different thread 2629 */ 2630 private class NativePositionEventHandlerDelegate { 2631 private final Handler mHandler; 2632 2633 NativePositionEventHandlerDelegate(final AudioTrack track, 2634 final OnPlaybackPositionUpdateListener listener, 2635 Handler handler) { 2636 // find the looper for our new event handler 2637 Looper looper; 2638 if (handler != null) { 2639 looper = handler.getLooper(); 2640 } else { 2641 // no given handler, use the looper the AudioTrack was created in 2642 looper = mInitializationLooper; 2643 } 2644 2645 // construct the event handler with this looper 2646 if (looper != null) { 2647 // implement the event handler delegate 2648 mHandler = new Handler(looper) { 2649 @Override 2650 public void handleMessage(Message msg) { 2651 if (track == null) { 2652 return; 2653 } 2654 switch(msg.what) { 2655 case NATIVE_EVENT_MARKER: 2656 if (listener != null) { 2657 listener.onMarkerReached(track); 2658 } 2659 break; 2660 case NATIVE_EVENT_NEW_POS: 2661 if (listener != null) { 2662 listener.onPeriodicNotification(track); 2663 } 2664 break; 2665 default: 2666 loge("Unknown native event type: " + msg.what); 2667 break; 2668 } 2669 } 2670 }; 2671 } else { 2672 mHandler = null; 2673 } 2674 } 2675 2676 Handler getHandler() { 2677 return mHandler; 2678 } 2679 } 2680 2681 /** 2682 * Helper class to handle the forwarding of native events to the appropriate listener 2683 * (potentially) handled in a different thread 2684 */ 2685 private class NativeRoutingEventHandlerDelegate { 2686 private final Handler mHandler; 2687 2688 NativeRoutingEventHandlerDelegate(final AudioTrack track, 2689 final AudioRouting.OnRoutingChangedListener listener, 2690 Handler handler) { 2691 // find the looper for our new event handler 2692 Looper looper; 2693 if (handler != null) { 2694 looper = handler.getLooper(); 2695 } else { 2696 // no given handler, use the looper the AudioTrack was created in 2697 looper = mInitializationLooper; 2698 } 2699 2700 // construct the event handler with this looper 2701 if (looper != null) { 2702 // implement the event handler delegate 2703 mHandler = new Handler(looper) { 2704 @Override 2705 public void handleMessage(Message msg) { 2706 if (track == null) { 2707 return; 2708 } 2709 switch(msg.what) { 2710 case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE: 2711 if (listener != null) { 2712 listener.onRoutingChanged(track); 2713 } 2714 break; 2715 default: 2716 loge("Unknown native event type: " + msg.what); 2717 break; 2718 } 2719 } 2720 }; 2721 } else { 2722 mHandler = null; 2723 } 2724 } 2725 2726 Handler getHandler() { 2727 return mHandler; 2728 } 2729 } 2730 2731 //--------------------------------------------------------- 2732 // Java methods called from the native side 2733 //-------------------- 2734 @SuppressWarnings("unused") 2735 private static void postEventFromNative(Object audiotrack_ref, 2736 int what, int arg1, int arg2, Object obj) { 2737 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 2738 AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get(); 2739 if (track == null) { 2740 return; 2741 } 2742 2743 if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) { 2744 track.broadcastRoutingChange(); 2745 return; 2746 } 2747 NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate; 2748 if (delegate != null) { 2749 Handler handler = delegate.getHandler(); 2750 if (handler != null) { 2751 Message m = handler.obtainMessage(what, arg1, arg2, obj); 2752 handler.sendMessage(m); 2753 } 2754 } 2755 } 2756 2757 2758 //--------------------------------------------------------- 2759 // Native methods called from the Java side 2760 //-------------------- 2761 2762 // post-condition: mStreamType is overwritten with a value 2763 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 2764 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC 2765 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 2766 Object /*AudioAttributes*/ attributes, 2767 int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, 2768 int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack); 2769 2770 private native final void native_finalize(); 2771 2772 /** 2773 * @hide 2774 */ 2775 public native final void native_release(); 2776 2777 private native final void native_start(); 2778 2779 private native final void native_stop(); 2780 2781 private native final void native_pause(); 2782 2783 private native final void native_flush(); 2784 2785 private native final int native_write_byte(byte[] audioData, 2786 int offsetInBytes, int sizeInBytes, int format, 2787 boolean isBlocking); 2788 2789 private native final int native_write_short(short[] audioData, 2790 int offsetInShorts, int sizeInShorts, int format, 2791 boolean isBlocking); 2792 2793 private native final int native_write_float(float[] audioData, 2794 int offsetInFloats, int sizeInFloats, int format, 2795 boolean isBlocking); 2796 2797 private native final int native_write_native_bytes(Object audioData, 2798 int positionInBytes, int sizeInBytes, int format, boolean blocking); 2799 2800 private native final int native_reload_static(); 2801 2802 private native final int native_get_buffer_size_frames(); 2803 private native final int native_set_buffer_size_frames(int bufferSizeInFrames); 2804 private native final int native_get_buffer_capacity_frames(); 2805 2806 private native final void native_setVolume(float leftVolume, float rightVolume); 2807 2808 private native final int native_set_playback_rate(int sampleRateInHz); 2809 private native final int native_get_playback_rate(); 2810 2811 private native final void native_set_playback_params(@NonNull PlaybackParams params); 2812 private native final @NonNull PlaybackParams native_get_playback_params(); 2813 2814 private native final int native_set_marker_pos(int marker); 2815 private native final int native_get_marker_pos(); 2816 2817 private native final int native_set_pos_update_period(int updatePeriod); 2818 private native final int native_get_pos_update_period(); 2819 2820 private native final int native_set_position(int position); 2821 private native final int native_get_position(); 2822 2823 private native final int native_get_latency(); 2824 2825 private native final int native_get_underrun_count(); 2826 2827 // longArray must be a non-null array of length >= 2 2828 // [0] is assigned the frame position 2829 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds 2830 private native final int native_get_timestamp(long[] longArray); 2831 2832 private native final int native_set_loop(int start, int end, int loopCount); 2833 2834 static private native final int native_get_output_sample_rate(int streamType); 2835 static private native final int native_get_min_buff_size( 2836 int sampleRateInHz, int channelConfig, int audioFormat); 2837 2838 private native final int native_attachAuxEffect(int effectId); 2839 private native final int native_setAuxEffectSendLevel(float level); 2840 2841 private native final boolean native_setOutputDevice(int deviceId); 2842 private native final int native_getRoutedDeviceId(); 2843 private native final void native_enableDeviceCallback(); 2844 private native final void native_disableDeviceCallback(); 2845 static private native int native_get_FCC_8(); 2846 2847 //--------------------------------------------------------- 2848 // Utility methods 2849 //------------------ 2850 2851 private static void logd(String msg) { 2852 Log.d(TAG, msg); 2853 } 2854 2855 private static void loge(String msg) { 2856 Log.e(TAG, msg); 2857 } 2858} 2859