AudioTrack.java revision 7922be86e29002dc3714824b2b669a9760de0528
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17package android.media; 18 19import java.lang.annotation.Retention; 20import java.lang.annotation.RetentionPolicy; 21import java.lang.ref.WeakReference; 22import java.lang.Math; 23import java.nio.ByteBuffer; 24import java.nio.ByteOrder; 25import java.nio.NioUtils; 26import java.util.Collection; 27 28import android.annotation.IntDef; 29import android.annotation.NonNull; 30import android.app.ActivityThread; 31import android.app.AppOpsManager; 32import android.content.Context; 33import android.os.Handler; 34import android.os.IBinder; 35import android.os.Looper; 36import android.os.Message; 37import android.os.Process; 38import android.os.RemoteException; 39import android.os.ServiceManager; 40import android.util.ArrayMap; 41import android.util.Log; 42 43import com.android.internal.app.IAppOpsService; 44 45 46/** 47 * The AudioTrack class manages and plays a single audio resource for Java applications. 48 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 49 * achieved by "pushing" the data to the AudioTrack object using one of the 50 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 51 * and {@link #write(float[], int, int, int)} methods. 52 * 53 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 54 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 55 * one of the {@code write()} methods. These are blocking and return when the data has been 56 * transferred from the Java layer to the native layer and queued for playback. The streaming 57 * mode is most useful when playing blocks of audio data that for instance are: 58 * 59 * <ul> 60 * <li>too big to fit in memory because of the duration of the sound to play,</li> 61 * <li>too big to fit in memory because of the characteristics of the audio data 62 * (high sampling rate, bits per sample ...)</li> 63 * <li>received or generated while previously queued audio is playing.</li> 64 * </ul> 65 * 66 * The static mode should be chosen when dealing with short sounds that fit in memory and 67 * that need to be played with the smallest latency possible. The static mode will 68 * therefore be preferred for UI and game sounds that are played often, and with the 69 * smallest overhead possible. 70 * 71 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 72 * The size of this buffer, specified during the construction, determines how long an AudioTrack 73 * can play before running out of data.<br> 74 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 75 * be played from it.<br> 76 * For the streaming mode, data will be written to the audio sink in chunks of 77 * sizes less than or equal to the total buffer size. 78 * 79 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 80 */ 81public class AudioTrack 82{ 83 //--------------------------------------------------------- 84 // Constants 85 //-------------------- 86 /** Minimum value for a linear gain or auxiliary effect level. 87 * This value must be exactly equal to 0.0f; do not change it. 88 */ 89 private static final float GAIN_MIN = 0.0f; 90 /** Maximum value for a linear gain or auxiliary effect level. 91 * This value must be greater than or equal to 1.0f. 92 */ 93 private static final float GAIN_MAX = 1.0f; 94 95 /** Minimum value for sample rate */ 96 private static final int SAMPLE_RATE_HZ_MIN = 4000; 97 /** Maximum value for sample rate */ 98 private static final int SAMPLE_RATE_HZ_MAX = 192000; 99 100 /** Maximum value for AudioTrack channel count 101 * @hide public for MediaCode only, do not un-hide or change to a numeric literal 102 */ 103 public static final int CHANNEL_COUNT_MAX = 8; // FIXME was native_get_FCC_8(), unregistered! 104 105 /** indicates AudioTrack state is stopped */ 106 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 107 /** indicates AudioTrack state is paused */ 108 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 109 /** indicates AudioTrack state is playing */ 110 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 111 112 // keep these values in sync with android_media_AudioTrack.cpp 113 /** 114 * Creation mode where audio data is transferred from Java to the native layer 115 * only once before the audio starts playing. 116 */ 117 public static final int MODE_STATIC = 0; 118 /** 119 * Creation mode where audio data is streamed from Java to the native layer 120 * as the audio is playing. 121 */ 122 public static final int MODE_STREAM = 1; 123 124 /** @hide */ 125 @IntDef({ 126 MODE_STATIC, 127 MODE_STREAM 128 }) 129 @Retention(RetentionPolicy.SOURCE) 130 public @interface TransferMode {} 131 132 /** 133 * State of an AudioTrack that was not successfully initialized upon creation. 134 */ 135 public static final int STATE_UNINITIALIZED = 0; 136 /** 137 * State of an AudioTrack that is ready to be used. 138 */ 139 public static final int STATE_INITIALIZED = 1; 140 /** 141 * State of a successfully initialized AudioTrack that uses static data, 142 * but that hasn't received that data yet. 143 */ 144 public static final int STATE_NO_STATIC_DATA = 2; 145 146 /** 147 * Denotes a successful operation. 148 */ 149 public static final int SUCCESS = AudioSystem.SUCCESS; 150 /** 151 * Denotes a generic operation failure. 152 */ 153 public static final int ERROR = AudioSystem.ERROR; 154 /** 155 * Denotes a failure due to the use of an invalid value. 156 */ 157 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 158 /** 159 * Denotes a failure due to the improper use of a method. 160 */ 161 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 162 /** 163 * An error code indicating that the object reporting it is no longer valid and needs to 164 * be recreated. 165 * @hide 166 */ 167 public static final int ERROR_DEAD_OBJECT = AudioSystem.DEAD_OBJECT; 168 /** 169 * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state, 170 * or immediately after start/ACTIVE. 171 * @hide 172 */ 173 public static final int ERROR_WOULD_BLOCK = AudioSystem.WOULD_BLOCK; 174 175 // Error codes: 176 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 177 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 178 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 179 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 180 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 181 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 182 183 // Events: 184 // to keep in sync with frameworks/av/include/media/AudioTrack.h 185 /** 186 * Event id denotes when playback head has reached a previously set marker. 187 */ 188 private static final int NATIVE_EVENT_MARKER = 3; 189 /** 190 * Event id denotes when previously set update period has elapsed during playback. 191 */ 192 private static final int NATIVE_EVENT_NEW_POS = 4; 193 194 private final static String TAG = "android.media.AudioTrack"; 195 196 197 /** @hide */ 198 @IntDef({ 199 WRITE_BLOCKING, 200 WRITE_NON_BLOCKING 201 }) 202 @Retention(RetentionPolicy.SOURCE) 203 public @interface WriteMode {} 204 205 /** 206 * The write mode indicating the write operation will block until all data has been written, 207 * to be used as the actual value of the writeMode parameter in 208 * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)}, 209 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 210 * {@link #write(ByteBuffer, int, int, long)}. 211 */ 212 public final static int WRITE_BLOCKING = 0; 213 214 /** 215 * The write mode indicating the write operation will return immediately after 216 * queuing as much audio data for playback as possible without blocking, 217 * to be used as the actual value of the writeMode parameter in 218 * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)}, 219 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 220 * {@link #write(ByteBuffer, int, int, long)}. 221 */ 222 public final static int WRITE_NON_BLOCKING = 1; 223 224 //-------------------------------------------------------------------------- 225 // Member variables 226 //-------------------- 227 /** 228 * Indicates the state of the AudioTrack instance. 229 * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA. 230 */ 231 private int mState = STATE_UNINITIALIZED; 232 /** 233 * Indicates the play state of the AudioTrack instance. 234 * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING. 235 */ 236 private int mPlayState = PLAYSTATE_STOPPED; 237 /** 238 * Lock to ensure mPlayState updates reflect the actual state of the object. 239 */ 240 private final Object mPlayStateLock = new Object(); 241 /** 242 * Sizes of the native audio buffer. 243 * These values are set during construction and can be stale. 244 * To obtain the current native audio buffer frame count use {@link #getBufferSizeInFrames()}. 245 */ 246 private int mNativeBufferSizeInBytes = 0; 247 private int mNativeBufferSizeInFrames = 0; 248 /** 249 * Handler for events coming from the native code. 250 */ 251 private NativePositionEventHandlerDelegate mEventHandlerDelegate; 252 /** 253 * Looper associated with the thread that creates the AudioTrack instance. 254 */ 255 private final Looper mInitializationLooper; 256 /** 257 * The audio data source sampling rate in Hz. 258 */ 259 private int mSampleRate; // initialized by all constructors via audioParamCheck() 260 /** 261 * The number of audio output channels (1 is mono, 2 is stereo, etc.). 262 */ 263 private int mChannelCount = 1; 264 /** 265 * The audio channel mask used for calling native AudioTrack 266 */ 267 private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 268 269 /** 270 * The type of the audio stream to play. See 271 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 272 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 273 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 274 * {@link AudioManager#STREAM_DTMF}. 275 */ 276 private int mStreamType = AudioManager.STREAM_MUSIC; 277 278 private final AudioAttributes mAttributes; 279 /** 280 * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM. 281 */ 282 private int mDataLoadMode = MODE_STREAM; 283 /** 284 * The current channel position mask, as specified on AudioTrack creation. 285 * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}. 286 * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified. 287 */ 288 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 289 /** 290 * The channel index mask if specified, otherwise 0. 291 */ 292 private int mChannelIndexMask = 0; 293 /** 294 * The encoding of the audio samples. 295 * @see AudioFormat#ENCODING_PCM_8BIT 296 * @see AudioFormat#ENCODING_PCM_16BIT 297 * @see AudioFormat#ENCODING_PCM_FLOAT 298 */ 299 private int mAudioFormat; // initialized by all constructors via audioParamCheck() 300 /** 301 * Audio session ID 302 */ 303 private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE; 304 /** 305 * Reference to the app-ops service. 306 */ 307 private final IAppOpsService mAppOps; 308 /** 309 * HW_AV_SYNC track AV Sync Header 310 */ 311 private ByteBuffer mAvSyncHeader = null; 312 /** 313 * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header 314 */ 315 private int mAvSyncBytesRemaining = 0; 316 317 //-------------------------------- 318 // Used exclusively by native code 319 //-------------------- 320 /** 321 * Accessed by native methods: provides access to C++ AudioTrack object. 322 */ 323 @SuppressWarnings("unused") 324 private long mNativeTrackInJavaObj; 325 /** 326 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 327 * the native AudioTrack object, but not stored in it). 328 */ 329 @SuppressWarnings("unused") 330 private long mJniData; 331 332 333 //-------------------------------------------------------------------------- 334 // Constructor, Finalize 335 //-------------------- 336 /** 337 * Class constructor. 338 * @param streamType the type of the audio stream. See 339 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 340 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 341 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 342 * @param sampleRateInHz the initial source sample rate expressed in Hz. 343 * @param channelConfig describes the configuration of the audio channels. 344 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 345 * {@link AudioFormat#CHANNEL_OUT_STEREO} 346 * @param audioFormat the format in which the audio data is represented. 347 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 348 * {@link AudioFormat#ENCODING_PCM_8BIT}, 349 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 350 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 351 * read from for playback. This should be a multiple of the frame size in bytes. 352 * <p> If the track's creation mode is {@link #MODE_STATIC}, 353 * this is the maximum length sample, or audio clip, that can be played by this instance. 354 * <p> If the track's creation mode is {@link #MODE_STREAM}, 355 * this should be the desired buffer size 356 * for the <code>AudioTrack</code> to satisfy the application's 357 * natural latency requirements. 358 * If <code>bufferSizeInBytes</code> is less than the 359 * minimum buffer size for the output sink, it is automatically increased to the minimum 360 * buffer size. 361 * The method {@link #getBufferSizeInFrames()} returns the 362 * actual size in frames of the native buffer created, which 363 * determines the frequency to write 364 * to the streaming <code>AudioTrack</code> to avoid underrun. 365 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 366 * @throws java.lang.IllegalArgumentException 367 */ 368 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 369 int bufferSizeInBytes, int mode) 370 throws IllegalArgumentException { 371 this(streamType, sampleRateInHz, channelConfig, audioFormat, 372 bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE); 373 } 374 375 /** 376 * Class constructor with audio session. Use this constructor when the AudioTrack must be 377 * attached to a particular audio session. The primary use of the audio session ID is to 378 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 379 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 380 * and media players in the same session and not to the output mix. 381 * When an AudioTrack is created without specifying a session, it will create its own session 382 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 383 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 384 * session 385 * with all other media players or audio tracks in the same session, otherwise a new session 386 * will be created for this track if none is supplied. 387 * @param streamType the type of the audio stream. See 388 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 389 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 390 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 391 * @param sampleRateInHz the initial source sample rate expressed in Hz. 392 * @param channelConfig describes the configuration of the audio channels. 393 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 394 * {@link AudioFormat#CHANNEL_OUT_STEREO} 395 * @param audioFormat the format in which the audio data is represented. 396 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 397 * {@link AudioFormat#ENCODING_PCM_8BIT}, 398 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 399 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 400 * from for playback. If using the AudioTrack in streaming mode, you can write data into 401 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 402 * this is the maximum size of the sound that will be played for this instance. 403 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 404 * for the successful creation of an AudioTrack instance in streaming mode. Using values 405 * smaller than getMinBufferSize() will result in an initialization failure. 406 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 407 * @param sessionId Id of audio session the AudioTrack must be attached to 408 * @throws java.lang.IllegalArgumentException 409 */ 410 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 411 int bufferSizeInBytes, int mode, int sessionId) 412 throws IllegalArgumentException { 413 // mState already == STATE_UNINITIALIZED 414 this((new AudioAttributes.Builder()) 415 .setLegacyStreamType(streamType) 416 .build(), 417 (new AudioFormat.Builder()) 418 .setChannelMask(channelConfig) 419 .setEncoding(audioFormat) 420 .setSampleRate(sampleRateInHz) 421 .build(), 422 bufferSizeInBytes, 423 mode, sessionId); 424 } 425 426 /** 427 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 428 * @param attributes a non-null {@link AudioAttributes} instance. 429 * @param format a non-null {@link AudioFormat} instance describing the format of the data 430 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 431 * configuring the audio format parameters such as encoding, channel mask and sample rate. 432 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 433 * from for playback. If using the AudioTrack in streaming mode, you can write data into 434 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 435 * this is the maximum size of the sound that will be played for this instance. 436 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 437 * for the successful creation of an AudioTrack instance in streaming mode. Using values 438 * smaller than getMinBufferSize() will result in an initialization failure. 439 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 440 * @param sessionId ID of audio session the AudioTrack must be attached to, or 441 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 442 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 443 * construction. 444 * @throws IllegalArgumentException 445 */ 446 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 447 int mode, int sessionId) 448 throws IllegalArgumentException { 449 // mState already == STATE_UNINITIALIZED 450 451 if (attributes == null) { 452 throw new IllegalArgumentException("Illegal null AudioAttributes"); 453 } 454 if (format == null) { 455 throw new IllegalArgumentException("Illegal null AudioFormat"); 456 } 457 458 // remember which looper is associated with the AudioTrack instantiation 459 Looper looper; 460 if ((looper = Looper.myLooper()) == null) { 461 looper = Looper.getMainLooper(); 462 } 463 464 int rate = 0; 465 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0) 466 { 467 rate = format.getSampleRate(); 468 } else { 469 rate = AudioSystem.getPrimaryOutputSamplingRate(); 470 if (rate <= 0) { 471 rate = 44100; 472 } 473 } 474 int channelIndexMask = 0; 475 if ((format.getPropertySetMask() 476 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { 477 channelIndexMask = format.getChannelIndexMask(); 478 } 479 int channelMask = 0; 480 if ((format.getPropertySetMask() 481 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { 482 channelMask = format.getChannelMask(); 483 } else if (channelIndexMask == 0) { // if no masks at all, use stereo 484 channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT 485 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 486 } 487 int encoding = AudioFormat.ENCODING_DEFAULT; 488 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 489 encoding = format.getEncoding(); 490 } 491 audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); 492 mStreamType = AudioSystem.STREAM_DEFAULT; 493 494 audioBuffSizeCheck(bufferSizeInBytes); 495 496 mInitializationLooper = looper; 497 IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE); 498 mAppOps = IAppOpsService.Stub.asInterface(b); 499 500 mAttributes = new AudioAttributes.Builder(attributes).build(); 501 502 if (sessionId < 0) { 503 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 504 } 505 506 int[] session = new int[1]; 507 session[0] = sessionId; 508 // native initialization 509 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 510 mSampleRate, mChannelMask, mChannelIndexMask, mAudioFormat, 511 mNativeBufferSizeInBytes, mDataLoadMode, session); 512 if (initResult != SUCCESS) { 513 loge("Error code "+initResult+" when initializing AudioTrack."); 514 return; // with mState == STATE_UNINITIALIZED 515 } 516 517 mSessionId = session[0]; 518 519 if (mDataLoadMode == MODE_STATIC) { 520 mState = STATE_NO_STATIC_DATA; 521 } else { 522 mState = STATE_INITIALIZED; 523 } 524 } 525 526 /** 527 * Builder class for {@link AudioTrack} objects. 528 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 529 * attributes and audio format parameters, you indicate which of those vary from the default 530 * behavior on the device. 531 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 532 * parameters, to be used by a new <code>AudioTrack</code> instance: 533 * 534 * <pre class="prettyprint"> 535 * AudioTrack player = new AudioTrack.Builder() 536 * .setAudioAttributes(new AudioAttributes.Builder() 537 * .setUsage(AudioAttributes.USAGE_ALARM) 538 * .setContentType(CONTENT_TYPE_MUSIC) 539 * .build()) 540 * .setAudioFormat(new AudioFormat.Builder() 541 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 542 * .setSampleRate(441000) 543 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 544 * .build()) 545 * .setBufferSize(minBuffSize) 546 * .build(); 547 * </pre> 548 * <p> 549 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 550 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 551 * <br>If the audio format is not specified or is incomplete, its sample rate will be the 552 * default output sample rate of the device (see 553 * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be 554 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 555 * {@link AudioFormat#ENCODING_PCM_16BIT}. 556 * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)}, 557 * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used. 558 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 559 * <code>MODE_STREAM</code> will be used. 560 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 561 * be generated. 562 */ 563 public static class Builder { 564 private AudioAttributes mAttributes; 565 private AudioFormat mFormat; 566 private int mBufferSizeInBytes; 567 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 568 private int mMode = MODE_STREAM; 569 570 /** 571 * Constructs a new Builder with the default values as described above. 572 */ 573 public Builder() { 574 } 575 576 /** 577 * Sets the {@link AudioAttributes}. 578 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 579 * data to be played. 580 * @return the same Builder instance. 581 * @throws IllegalArgumentException 582 */ 583 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 584 throws IllegalArgumentException { 585 if (attributes == null) { 586 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 587 } 588 // keep reference, we only copy the data when building 589 mAttributes = attributes; 590 return this; 591 } 592 593 /** 594 * Sets the format of the audio data to be played by the {@link AudioTrack}. 595 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 596 * as encoding, channel mask and sample rate. 597 * @param format a non-null {@link AudioFormat} instance. 598 * @return the same Builder instance. 599 * @throws IllegalArgumentException 600 */ 601 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 602 throws IllegalArgumentException { 603 if (format == null) { 604 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 605 } 606 // keep reference, we only copy the data when building 607 mFormat = format; 608 return this; 609 } 610 611 /** 612 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 613 * If using the {@link AudioTrack} in streaming mode 614 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 615 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 616 * the minimum required buffer size for the successful creation of an AudioTrack instance 617 * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result 618 * in an exception when trying to build the <code>AudioTrack</code>. 619 * <br>If using the <code>AudioTrack</code> in static mode (see 620 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 621 * played by this instance. 622 * @param bufferSizeInBytes 623 * @return the same Builder instance. 624 * @throws IllegalArgumentException 625 */ 626 public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes) 627 throws IllegalArgumentException { 628 if (bufferSizeInBytes <= 0) { 629 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 630 } 631 mBufferSizeInBytes = bufferSizeInBytes; 632 return this; 633 } 634 635 /** 636 * Sets the mode under which buffers of audio data are transferred from the 637 * {@link AudioTrack} to the framework. 638 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 639 * @return the same Builder instance. 640 * @throws IllegalArgumentException 641 */ 642 public @NonNull Builder setTransferMode(@TransferMode int mode) 643 throws IllegalArgumentException { 644 switch(mode) { 645 case MODE_STREAM: 646 case MODE_STATIC: 647 mMode = mode; 648 break; 649 default: 650 throw new IllegalArgumentException("Invalid transfer mode " + mode); 651 } 652 return this; 653 } 654 655 /** 656 * Sets the session ID the {@link AudioTrack} will be attached to. 657 * @param sessionId a strictly positive ID number retrieved from another 658 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 659 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 660 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 661 * @return the same Builder instance. 662 * @throws IllegalArgumentException 663 */ 664 public @NonNull Builder setSessionId(int sessionId) 665 throws IllegalArgumentException { 666 if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 667 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 668 } 669 mSessionId = sessionId; 670 return this; 671 } 672 673 /** 674 * Builds an {@link AudioTrack} instance initialized with all the parameters set 675 * on this <code>Builder</code>. 676 * @return a new successfully initialized {@link AudioTrack} instance. 677 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 678 * were incompatible, or if they are not supported by the device, 679 * or if the device was not available. 680 */ 681 public @NonNull AudioTrack build() throws UnsupportedOperationException { 682 if (mAttributes == null) { 683 mAttributes = new AudioAttributes.Builder() 684 .setUsage(AudioAttributes.USAGE_MEDIA) 685 .build(); 686 } 687 if (mFormat == null) { 688 mFormat = new AudioFormat.Builder() 689 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 690 .setSampleRate(AudioSystem.getPrimaryOutputSamplingRate()) 691 .setEncoding(AudioFormat.ENCODING_DEFAULT) 692 .build(); 693 } 694 try { 695 // If the buffer size is not specified in streaming mode, 696 // use a single frame for the buffer size and let the 697 // native code figure out the minimum buffer size. 698 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) { 699 mBufferSizeInBytes = mFormat.getChannelCount() 700 * mFormat.getBytesPerSample(mFormat.getEncoding()); 701 } 702 final AudioTrack track = new AudioTrack( 703 mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId); 704 if (track.getState() == STATE_UNINITIALIZED) { 705 // release is not necessary 706 throw new UnsupportedOperationException("Cannot create AudioTrack"); 707 } 708 return track; 709 } catch (IllegalArgumentException e) { 710 throw new UnsupportedOperationException(e.getMessage()); 711 } 712 } 713 } 714 715 // mask of all the positional channels supported, however the allowed combinations 716 // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX 717 private static final int SUPPORTED_OUT_CHANNELS = 718 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 719 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 720 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 721 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 722 AudioFormat.CHANNEL_OUT_BACK_LEFT | 723 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 724 AudioFormat.CHANNEL_OUT_BACK_CENTER | 725 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 726 AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 727 728 // Convenience method for the constructor's parameter checks. 729 // This is where constructor IllegalArgumentException-s are thrown 730 // postconditions: 731 // mChannelCount is valid 732 // mChannelMask is valid 733 // mAudioFormat is valid 734 // mSampleRate is valid 735 // mDataLoadMode is valid 736 private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, 737 int audioFormat, int mode) { 738 //-------------- 739 // sample rate, note these values are subject to change 740 if (sampleRateInHz < SAMPLE_RATE_HZ_MIN || sampleRateInHz > SAMPLE_RATE_HZ_MAX) { 741 throw new IllegalArgumentException(sampleRateInHz 742 + "Hz is not a supported sample rate."); 743 } 744 mSampleRate = sampleRateInHz; 745 746 //-------------- 747 // channel config 748 mChannelConfiguration = channelConfig; 749 750 switch (channelConfig) { 751 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 752 case AudioFormat.CHANNEL_OUT_MONO: 753 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 754 mChannelCount = 1; 755 mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 756 break; 757 case AudioFormat.CHANNEL_OUT_STEREO: 758 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 759 mChannelCount = 2; 760 mChannelMask = AudioFormat.CHANNEL_OUT_STEREO; 761 break; 762 default: 763 if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) { 764 mChannelCount = 0; 765 break; // channel index configuration only 766 } 767 if (!isMultichannelConfigSupported(channelConfig)) { 768 // input channel configuration features unsupported channels 769 throw new IllegalArgumentException("Unsupported channel configuration."); 770 } 771 mChannelMask = channelConfig; 772 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 773 } 774 // check the channel index configuration (if present) 775 mChannelIndexMask = channelIndexMask; 776 if (mChannelIndexMask != 0) { 777 // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2 778 final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1; 779 if ((channelIndexMask & ~indexMask) != 0) { 780 throw new IllegalArgumentException("Unsupported channel index configuration " 781 + channelIndexMask); 782 } 783 int channelIndexCount = Integer.bitCount(channelIndexMask); 784 if (mChannelCount == 0) { 785 mChannelCount = channelIndexCount; 786 } else if (mChannelCount != channelIndexCount) { 787 throw new IllegalArgumentException("Channel count must match"); 788 } 789 } 790 791 //-------------- 792 // audio format 793 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 794 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 795 } 796 797 if (!AudioFormat.isPublicEncoding(audioFormat)) { 798 throw new IllegalArgumentException("Unsupported audio encoding."); 799 } 800 mAudioFormat = audioFormat; 801 802 //-------------- 803 // audio load mode 804 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 805 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 806 throw new IllegalArgumentException("Invalid mode."); 807 } 808 mDataLoadMode = mode; 809 } 810 811 /** 812 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 813 * @param channelConfig the mask to validate 814 * @return false if the AudioTrack can't be used with such a mask 815 */ 816 private static boolean isMultichannelConfigSupported(int channelConfig) { 817 // check for unsupported channels 818 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 819 loge("Channel configuration features unsupported channels"); 820 return false; 821 } 822 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 823 if (channelCount > CHANNEL_COUNT_MAX) { 824 loge("Channel configuration contains too many channels " + 825 channelCount + ">" + CHANNEL_COUNT_MAX); 826 return false; 827 } 828 // check for unsupported multichannel combinations: 829 // - FL/FR must be present 830 // - L/R channels must be paired (e.g. no single L channel) 831 final int frontPair = 832 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 833 if ((channelConfig & frontPair) != frontPair) { 834 loge("Front channels must be present in multichannel configurations"); 835 return false; 836 } 837 final int backPair = 838 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 839 if ((channelConfig & backPair) != 0) { 840 if ((channelConfig & backPair) != backPair) { 841 loge("Rear channels can't be used independently"); 842 return false; 843 } 844 } 845 final int sidePair = 846 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 847 if ((channelConfig & sidePair) != 0 848 && (channelConfig & sidePair) != sidePair) { 849 loge("Side channels can't be used independently"); 850 return false; 851 } 852 return true; 853 } 854 855 856 // Convenience method for the constructor's audio buffer size check. 857 // preconditions: 858 // mChannelCount is valid 859 // mAudioFormat is valid 860 // postcondition: 861 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) 862 private void audioBuffSizeCheck(int audioBufferSize) { 863 // NB: this section is only valid with PCM data. 864 // To update when supporting compressed formats 865 int frameSizeInBytes; 866 if (AudioFormat.isEncodingLinearPcm(mAudioFormat)) { 867 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat); 868 } else { 869 frameSizeInBytes = 1; 870 } 871 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 872 throw new IllegalArgumentException("Invalid audio buffer size."); 873 } 874 875 mNativeBufferSizeInBytes = audioBufferSize; 876 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 877 } 878 879 880 /** 881 * Releases the native AudioTrack resources. 882 */ 883 public void release() { 884 // even though native_release() stops the native AudioTrack, we need to stop 885 // AudioTrack subclasses too. 886 try { 887 stop(); 888 } catch(IllegalStateException ise) { 889 // don't raise an exception, we're releasing the resources. 890 } 891 native_release(); 892 mState = STATE_UNINITIALIZED; 893 } 894 895 @Override 896 protected void finalize() { 897 native_finalize(); 898 } 899 900 //-------------------------------------------------------------------------- 901 // Getters 902 //-------------------- 903 /** 904 * Returns the minimum gain value, which is the constant 0.0. 905 * Gain values less than 0.0 will be clamped to 0.0. 906 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 907 * @return the minimum value, which is the constant 0.0. 908 */ 909 static public float getMinVolume() { 910 return GAIN_MIN; 911 } 912 913 /** 914 * Returns the maximum gain value, which is greater than or equal to 1.0. 915 * Gain values greater than the maximum will be clamped to the maximum. 916 * <p>The word "volume" in the API name is historical; this is actually a gain. 917 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 918 * corresponds to a gain of 0 dB (sample values left unmodified). 919 * @return the maximum value, which is greater than or equal to 1.0. 920 */ 921 static public float getMaxVolume() { 922 return GAIN_MAX; 923 } 924 925 /** 926 * Returns the configured audio data sample rate in Hz 927 */ 928 public int getSampleRate() { 929 return mSampleRate; 930 } 931 932 /** 933 * Returns the current playback sample rate rate in Hz. 934 */ 935 public int getPlaybackRate() { 936 return native_get_playback_rate(); 937 } 938 939 /** 940 * Returns the current playback parameters. 941 * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters 942 * @return current {@link PlaybackParams}. 943 * @throws IllegalStateException if track is not initialized. 944 */ 945 public @NonNull PlaybackParams getPlaybackParams() { 946 return native_get_playback_params(); 947 } 948 949 /** 950 * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT}, 951 * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}. 952 */ 953 public int getAudioFormat() { 954 return mAudioFormat; 955 } 956 957 /** 958 * Returns the type of audio stream this AudioTrack is configured for. 959 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 960 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 961 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 962 * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}. 963 */ 964 public int getStreamType() { 965 return mStreamType; 966 } 967 968 /** 969 * Returns the configured channel position mask. 970 * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO}, 971 * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}. 972 * This method may return {@link AudioFormat#CHANNEL_INVALID} if 973 * a channel index mask was used. Consider 974 * {@link #getFormat()} instead, to obtain an {@link AudioFormat}, 975 * which contains both the channel position mask and the channel index mask. 976 */ 977 public int getChannelConfiguration() { 978 return mChannelConfiguration; 979 } 980 981 /** 982 * Returns the configured <code>AudioTrack</code> format. 983 * @return an {@link AudioFormat} containing the 984 * <code>AudioTrack</code> parameters at the time of configuration. 985 */ 986 public @NonNull AudioFormat getFormat() { 987 AudioFormat.Builder builder = new AudioFormat.Builder() 988 .setSampleRate(mSampleRate) 989 .setEncoding(mAudioFormat); 990 if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) { 991 builder.setChannelMask(mChannelConfiguration); 992 } 993 if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) { 994 builder.setChannelIndexMask(mChannelIndexMask); 995 } 996 return builder.build(); 997 } 998 999 /** 1000 * Returns the configured number of channels. 1001 */ 1002 public int getChannelCount() { 1003 return mChannelCount; 1004 } 1005 1006 /** 1007 * Returns the state of the AudioTrack instance. This is useful after the 1008 * AudioTrack instance has been created to check if it was initialized 1009 * properly. This ensures that the appropriate resources have been acquired. 1010 * @see #STATE_UNINITIALIZED 1011 * @see #STATE_INITIALIZED 1012 * @see #STATE_NO_STATIC_DATA 1013 */ 1014 public int getState() { 1015 return mState; 1016 } 1017 1018 /** 1019 * Returns the playback state of the AudioTrack instance. 1020 * @see #PLAYSTATE_STOPPED 1021 * @see #PLAYSTATE_PAUSED 1022 * @see #PLAYSTATE_PLAYING 1023 */ 1024 public int getPlayState() { 1025 synchronized (mPlayStateLock) { 1026 return mPlayState; 1027 } 1028 } 1029 1030 /** 1031 * Returns the frame count of the native <code>AudioTrack</code> buffer. 1032 * <p> If the track's creation mode is {@link #MODE_STATIC}, 1033 * it is equal to the specified bufferSizeInBytes on construction, converted to frame units. 1034 * A static track's native frame count will not change. 1035 * <p> If the track's creation mode is {@link #MODE_STREAM}, 1036 * it is greater than or equal to the specified bufferSizeInBytes converted to frame units. 1037 * For streaming tracks, this value may be rounded up to a larger value if needed by 1038 * the target output sink, and 1039 * if the track is subsequently routed to a different output sink, the native 1040 * frame count may enlarge to accommodate. 1041 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1042 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1043 * the size of the native <code>AudioTrack</code> buffer in bytes. 1044 * <p> See also {@link AudioManager#getProperty(String)} for key 1045 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1046 * @return current size in frames of the <code>AudioTrack</code> buffer. 1047 * @throws IllegalStateException 1048 */ 1049 public int getBufferSizeInFrames() { 1050 return native_get_native_frame_count(); 1051 } 1052 1053 /** 1054 * Returns the frame count of the native <code>AudioTrack</code> buffer. 1055 * @return current size in frames of the <code>AudioTrack</code> buffer. 1056 * @throws IllegalStateException 1057 * @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead. 1058 */ 1059 @Deprecated 1060 protected int getNativeFrameCount() { 1061 return native_get_native_frame_count(); 1062 } 1063 1064 /** 1065 * Returns marker position expressed in frames. 1066 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 1067 * or zero if marker is disabled. 1068 */ 1069 public int getNotificationMarkerPosition() { 1070 return native_get_marker_pos(); 1071 } 1072 1073 /** 1074 * Returns the notification update period expressed in frames. 1075 * Zero means that no position update notifications are being delivered. 1076 */ 1077 public int getPositionNotificationPeriod() { 1078 return native_get_pos_update_period(); 1079 } 1080 1081 /** 1082 * Returns the playback head position expressed in frames. 1083 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 1084 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 1085 * This is a continuously advancing counter. It will wrap (overflow) periodically, 1086 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 1087 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 1088 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 1089 * the total number of frames played since reset, 1090 * <i>not</i> the current offset within the buffer. 1091 */ 1092 public int getPlaybackHeadPosition() { 1093 return native_get_position(); 1094 } 1095 1096 /** 1097 * Returns this track's estimated latency in milliseconds. This includes the latency due 1098 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 1099 * 1100 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 1101 * a better solution. 1102 * @hide 1103 */ 1104 public int getLatency() { 1105 return native_get_latency(); 1106 } 1107 1108 /** 1109 * Returns the output sample rate in Hz for the specified stream type. 1110 */ 1111 static public int getNativeOutputSampleRate(int streamType) { 1112 return native_get_output_sample_rate(streamType); 1113 } 1114 1115 /** 1116 * Returns the minimum buffer size required for the successful creation of an AudioTrack 1117 * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't 1118 * guarantee a smooth playback under load, and higher values should be chosen according to 1119 * the expected frequency at which the buffer will be refilled with additional data to play. 1120 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 1121 * to a higher value than the initial source sample rate, be sure to configure the buffer size 1122 * based on the highest planned sample rate. 1123 * @param sampleRateInHz the source sample rate expressed in Hz. 1124 * @param channelConfig describes the configuration of the audio channels. 1125 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 1126 * {@link AudioFormat#CHANNEL_OUT_STEREO} 1127 * @param audioFormat the format in which the audio data is represented. 1128 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 1129 * {@link AudioFormat#ENCODING_PCM_8BIT}, 1130 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1131 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 1132 * or {@link #ERROR} if unable to query for output properties, 1133 * or the minimum buffer size expressed in bytes. 1134 */ 1135 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 1136 int channelCount = 0; 1137 switch(channelConfig) { 1138 case AudioFormat.CHANNEL_OUT_MONO: 1139 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1140 channelCount = 1; 1141 break; 1142 case AudioFormat.CHANNEL_OUT_STEREO: 1143 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1144 channelCount = 2; 1145 break; 1146 default: 1147 if (!isMultichannelConfigSupported(channelConfig)) { 1148 loge("getMinBufferSize(): Invalid channel configuration."); 1149 return ERROR_BAD_VALUE; 1150 } else { 1151 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1152 } 1153 } 1154 1155 if (!AudioFormat.isPublicEncoding(audioFormat)) { 1156 loge("getMinBufferSize(): Invalid audio format."); 1157 return ERROR_BAD_VALUE; 1158 } 1159 1160 // sample rate, note these values are subject to change 1161 if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) { 1162 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 1163 return ERROR_BAD_VALUE; 1164 } 1165 1166 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 1167 if (size <= 0) { 1168 loge("getMinBufferSize(): error querying hardware"); 1169 return ERROR; 1170 } 1171 else { 1172 return size; 1173 } 1174 } 1175 1176 /** 1177 * Returns the audio session ID. 1178 * 1179 * @return the ID of the audio session this AudioTrack belongs to. 1180 */ 1181 public int getAudioSessionId() { 1182 return mSessionId; 1183 } 1184 1185 /** 1186 * Poll for a timestamp on demand. 1187 * <p> 1188 * If you need to track timestamps during initial warmup or after a routing or mode change, 1189 * you should request a new timestamp periodically until the reported timestamps 1190 * show that the frame position is advancing, or until it becomes clear that 1191 * timestamps are unavailable for this route. 1192 * <p> 1193 * After the clock is advancing at a stable rate, 1194 * query for a new timestamp approximately once every 10 seconds to once per minute. 1195 * Calling this method more often is inefficient. 1196 * It is also counter-productive to call this method more often than recommended, 1197 * because the short-term differences between successive timestamp reports are not meaningful. 1198 * If you need a high-resolution mapping between frame position and presentation time, 1199 * consider implementing that at application level, based on low-resolution timestamps. 1200 * <p> 1201 * The audio data at the returned position may either already have been 1202 * presented, or may have not yet been presented but is committed to be presented. 1203 * It is not possible to request the time corresponding to a particular position, 1204 * or to request the (fractional) position corresponding to a particular time. 1205 * If you need such features, consider implementing them at application level. 1206 * 1207 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1208 * and owned by caller. 1209 * @return true if a timestamp is available, or false if no timestamp is available. 1210 * If a timestamp if available, 1211 * the AudioTimestamp instance is filled in with a position in frame units, together 1212 * with the estimated time when that frame was presented or is committed to 1213 * be presented. 1214 * In the case that no timestamp is available, any supplied instance is left unaltered. 1215 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 1216 * or during and immediately after a route change. 1217 * A timestamp is permanently unavailable for a given route if the route does not support 1218 * timestamps. In this case, the approximate frame position can be obtained 1219 * using {@link #getPlaybackHeadPosition}. 1220 * However, it may be useful to continue to query for 1221 * timestamps occasionally, to recover after a route change. 1222 */ 1223 // Add this text when the "on new timestamp" API is added: 1224 // Use if you need to get the most recent timestamp outside of the event callback handler. 1225 public boolean getTimestamp(AudioTimestamp timestamp) 1226 { 1227 if (timestamp == null) { 1228 throw new IllegalArgumentException(); 1229 } 1230 // It's unfortunate, but we have to either create garbage every time or use synchronized 1231 long[] longArray = new long[2]; 1232 int ret = native_get_timestamp(longArray); 1233 if (ret != SUCCESS) { 1234 return false; 1235 } 1236 timestamp.framePosition = longArray[0]; 1237 timestamp.nanoTime = longArray[1]; 1238 return true; 1239 } 1240 1241 /** 1242 * Poll for a timestamp on demand. 1243 * <p> 1244 * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code. 1245 * 1246 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1247 * and owned by caller. 1248 * @return {@link #SUCCESS} if a timestamp is available 1249 * {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called 1250 * immediately after start/ACTIVE, when the number of frames consumed is less than the 1251 * overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll 1252 * again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time 1253 * for the timestamp. 1254 * {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1255 * needs to be recreated. 1256 * {@link #ERROR_INVALID_OPERATION} if current route does not support 1257 * timestamps. In this case, the approximate frame position can be obtained 1258 * using {@link #getPlaybackHeadPosition}. 1259 * 1260 * The AudioTimestamp instance is filled in with a position in frame units, together 1261 * with the estimated time when that frame was presented or is committed to 1262 * be presented. 1263 * @hide 1264 */ 1265 // Add this text when the "on new timestamp" API is added: 1266 // Use if you need to get the most recent timestamp outside of the event callback handler. 1267 public int getTimestampWithStatus(AudioTimestamp timestamp) 1268 { 1269 if (timestamp == null) { 1270 throw new IllegalArgumentException(); 1271 } 1272 // It's unfortunate, but we have to either create garbage every time or use synchronized 1273 long[] longArray = new long[2]; 1274 int ret = native_get_timestamp(longArray); 1275 timestamp.framePosition = longArray[0]; 1276 timestamp.nanoTime = longArray[1]; 1277 return ret; 1278 } 1279 1280 //-------------------------------------------------------------------------- 1281 // Initialization / configuration 1282 //-------------------- 1283 /** 1284 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1285 * for each periodic playback head position update. 1286 * Notifications will be received in the same thread as the one in which the AudioTrack 1287 * instance was created. 1288 * @param listener 1289 */ 1290 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 1291 setPlaybackPositionUpdateListener(listener, null); 1292 } 1293 1294 /** 1295 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1296 * for each periodic playback head position update. 1297 * Use this method to receive AudioTrack events in the Handler associated with another 1298 * thread than the one in which you created the AudioTrack instance. 1299 * @param listener 1300 * @param handler the Handler that will receive the event notification messages. 1301 */ 1302 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 1303 Handler handler) { 1304 if (listener != null) { 1305 mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler); 1306 } else { 1307 mEventHandlerDelegate = null; 1308 } 1309 } 1310 1311 1312 private static float clampGainOrLevel(float gainOrLevel) { 1313 if (Float.isNaN(gainOrLevel)) { 1314 throw new IllegalArgumentException(); 1315 } 1316 if (gainOrLevel < GAIN_MIN) { 1317 gainOrLevel = GAIN_MIN; 1318 } else if (gainOrLevel > GAIN_MAX) { 1319 gainOrLevel = GAIN_MAX; 1320 } 1321 return gainOrLevel; 1322 } 1323 1324 1325 /** 1326 * Sets the specified left and right output gain values on the AudioTrack. 1327 * <p>Gain values are clamped to the closed interval [0.0, max] where 1328 * max is the value of {@link #getMaxVolume}. 1329 * A value of 0.0 results in zero gain (silence), and 1330 * a value of 1.0 means unity gain (signal unchanged). 1331 * The default value is 1.0 meaning unity gain. 1332 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1333 * @param leftGain output gain for the left channel. 1334 * @param rightGain output gain for the right channel 1335 * @return error code or success, see {@link #SUCCESS}, 1336 * {@link #ERROR_INVALID_OPERATION} 1337 * @deprecated Applications should use {@link #setVolume} instead, as it 1338 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1339 */ 1340 public int setStereoVolume(float leftGain, float rightGain) { 1341 if (isRestricted()) { 1342 return SUCCESS; 1343 } 1344 if (mState == STATE_UNINITIALIZED) { 1345 return ERROR_INVALID_OPERATION; 1346 } 1347 1348 leftGain = clampGainOrLevel(leftGain); 1349 rightGain = clampGainOrLevel(rightGain); 1350 1351 native_setVolume(leftGain, rightGain); 1352 1353 return SUCCESS; 1354 } 1355 1356 1357 /** 1358 * Sets the specified output gain value on all channels of this track. 1359 * <p>Gain values are clamped to the closed interval [0.0, max] where 1360 * max is the value of {@link #getMaxVolume}. 1361 * A value of 0.0 results in zero gain (silence), and 1362 * a value of 1.0 means unity gain (signal unchanged). 1363 * The default value is 1.0 meaning unity gain. 1364 * <p>This API is preferred over {@link #setStereoVolume}, as it 1365 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1366 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1367 * @param gain output gain for all channels. 1368 * @return error code or success, see {@link #SUCCESS}, 1369 * {@link #ERROR_INVALID_OPERATION} 1370 */ 1371 public int setVolume(float gain) { 1372 return setStereoVolume(gain, gain); 1373 } 1374 1375 1376 /** 1377 * Sets the playback sample rate for this track. This sets the sampling rate at which 1378 * the audio data will be consumed and played back 1379 * (as set by the sampleRateInHz parameter in the 1380 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 1381 * not the original sampling rate of the 1382 * content. For example, setting it to half the sample rate of the content will cause the 1383 * playback to last twice as long, but will also result in a pitch shift down by one octave. 1384 * The valid sample rate range is from 1 Hz to twice the value returned by 1385 * {@link #getNativeOutputSampleRate(int)}. 1386 * Use {@link #setPlaybackParams(PlaybackParams)} for speed control. 1387 * <p> This method may also be used to repurpose an existing <code>AudioTrack</code> 1388 * for playback of content of differing sample rate, 1389 * but with identical encoding and channel mask. 1390 * @param sampleRateInHz the sample rate expressed in Hz 1391 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1392 * {@link #ERROR_INVALID_OPERATION} 1393 */ 1394 public int setPlaybackRate(int sampleRateInHz) { 1395 if (mState != STATE_INITIALIZED) { 1396 return ERROR_INVALID_OPERATION; 1397 } 1398 if (sampleRateInHz <= 0) { 1399 return ERROR_BAD_VALUE; 1400 } 1401 return native_set_playback_rate(sampleRateInHz); 1402 } 1403 1404 1405 /** 1406 * Sets the playback parameters. 1407 * This method returns failure if it cannot apply the playback parameters. 1408 * One possible cause is that the parameters for speed or pitch are out of range. 1409 * Another possible cause is that the <code>AudioTrack</code> is streaming 1410 * (see {@link #MODE_STREAM}) and the 1411 * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer 1412 * on configuration must be larger than the speed multiplied by the minimum size 1413 * {@link #getMinBufferSize(int, int, int)}) to allow proper playback. 1414 * @param params see {@link PlaybackParams}. In particular, 1415 * speed, pitch, and audio mode should be set. 1416 * @throws IllegalArgumentException if the parameters are invalid or not accepted. 1417 * @throws IllegalStateException if track is not initialized. 1418 */ 1419 public void setPlaybackParams(@NonNull PlaybackParams params) { 1420 if (params == null) { 1421 throw new IllegalArgumentException("params is null"); 1422 } 1423 native_set_playback_params(params); 1424 } 1425 1426 1427 /** 1428 * Sets the position of the notification marker. At most one marker can be active. 1429 * @param markerInFrames marker position in wrapping frame units similar to 1430 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 1431 * To set a marker at a position which would appear as zero due to wraparound, 1432 * a workaround is to use a non-zero position near zero, such as -1 or 1. 1433 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1434 * {@link #ERROR_INVALID_OPERATION} 1435 */ 1436 public int setNotificationMarkerPosition(int markerInFrames) { 1437 if (mState == STATE_UNINITIALIZED) { 1438 return ERROR_INVALID_OPERATION; 1439 } 1440 return native_set_marker_pos(markerInFrames); 1441 } 1442 1443 1444 /** 1445 * Sets the period for the periodic notification event. 1446 * @param periodInFrames update period expressed in frames. 1447 * Zero period means no position updates. A negative period is not allowed. 1448 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 1449 */ 1450 public int setPositionNotificationPeriod(int periodInFrames) { 1451 if (mState == STATE_UNINITIALIZED) { 1452 return ERROR_INVALID_OPERATION; 1453 } 1454 return native_set_pos_update_period(periodInFrames); 1455 } 1456 1457 1458 /** 1459 * Sets the playback head position within the static buffer. 1460 * The track must be stopped or paused for the position to be changed, 1461 * and must use the {@link #MODE_STATIC} mode. 1462 * @param positionInFrames playback head position within buffer, expressed in frames. 1463 * Zero corresponds to start of buffer. 1464 * The position must not be greater than the buffer size in frames, or negative. 1465 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 1466 * the position values have different meanings. 1467 * <br> 1468 * If looping is currently enabled and the new position is greater than or equal to the 1469 * loop end marker, the behavior varies by API level: 1470 * as of {@link android.os.Build.VERSION_CODES#M}, 1471 * the looping is first disabled and then the position is set. 1472 * For earlier API levels, the behavior is unspecified. 1473 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1474 * {@link #ERROR_INVALID_OPERATION} 1475 */ 1476 public int setPlaybackHeadPosition(int positionInFrames) { 1477 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1478 getPlayState() == PLAYSTATE_PLAYING) { 1479 return ERROR_INVALID_OPERATION; 1480 } 1481 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 1482 return ERROR_BAD_VALUE; 1483 } 1484 return native_set_position(positionInFrames); 1485 } 1486 1487 /** 1488 * Sets the loop points and the loop count. The loop can be infinite. 1489 * Similarly to setPlaybackHeadPosition, 1490 * the track must be stopped or paused for the loop points to be changed, 1491 * and must use the {@link #MODE_STATIC} mode. 1492 * @param startInFrames loop start marker expressed in frames. 1493 * Zero corresponds to start of buffer. 1494 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 1495 * @param endInFrames loop end marker expressed in frames. 1496 * The total buffer size in frames corresponds to end of buffer. 1497 * The end marker must not be greater than the buffer size in frames. 1498 * For looping, the end marker must not be less than or equal to the start marker, 1499 * but to disable looping 1500 * it is permitted for start marker, end marker, and loop count to all be 0. 1501 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 1502 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 1503 * support, 1504 * {@link #ERROR_BAD_VALUE} is returned. 1505 * The loop range is the interval [startInFrames, endInFrames). 1506 * <br> 1507 * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged, 1508 * unless it is greater than or equal to the loop end marker, in which case 1509 * it is forced to the loop start marker. 1510 * For earlier API levels, the effect on position is unspecified. 1511 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 1512 * A value of -1 means infinite looping, and 0 disables looping. 1513 * A value of positive N means to "loop" (go back) N times. For example, 1514 * a value of one means to play the region two times in total. 1515 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1516 * {@link #ERROR_INVALID_OPERATION} 1517 */ 1518 public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) { 1519 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1520 getPlayState() == PLAYSTATE_PLAYING) { 1521 return ERROR_INVALID_OPERATION; 1522 } 1523 if (loopCount == 0) { 1524 ; // explicitly allowed as an exception to the loop region range check 1525 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 1526 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 1527 return ERROR_BAD_VALUE; 1528 } 1529 return native_set_loop(startInFrames, endInFrames, loopCount); 1530 } 1531 1532 /** 1533 * Sets the initialization state of the instance. This method was originally intended to be used 1534 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 1535 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 1536 * @param state the state of the AudioTrack instance 1537 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 1538 */ 1539 @Deprecated 1540 protected void setState(int state) { 1541 mState = state; 1542 } 1543 1544 1545 //--------------------------------------------------------- 1546 // Transport control methods 1547 //-------------------- 1548 /** 1549 * Starts playing an AudioTrack. 1550 * <p> 1551 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 1552 * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)}, 1553 * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)}, 1554 * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to 1555 * play(). 1556 * <p> 1557 * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to 1558 * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor). 1559 * If you don't call write() first, or if you call write() but with an insufficient amount of 1560 * data, then the track will be in underrun state at play(). In this case, 1561 * playback will not actually start playing until the data path is filled to a 1562 * device-specific minimum level. This requirement for the path to be filled 1563 * to a minimum level is also true when resuming audio playback after calling stop(). 1564 * Similarly the buffer will need to be filled up again after 1565 * the track underruns due to failure to call write() in a timely manner with sufficient data. 1566 * For portability, an application should prime the data path to the maximum allowed 1567 * by writing data until the write() method returns a short transfer count. 1568 * This allows play() to start immediately, and reduces the chance of underrun. 1569 * 1570 * @throws IllegalStateException if the track isn't properly initialized 1571 */ 1572 public void play() 1573 throws IllegalStateException { 1574 if (mState != STATE_INITIALIZED) { 1575 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 1576 } 1577 if (isRestricted()) { 1578 setVolume(0); 1579 } 1580 synchronized(mPlayStateLock) { 1581 native_start(); 1582 mPlayState = PLAYSTATE_PLAYING; 1583 } 1584 } 1585 1586 private boolean isRestricted() { 1587 if ((mAttributes.getAllFlags() & AudioAttributes.FLAG_BYPASS_INTERRUPTION_POLICY) != 0) { 1588 return false; 1589 } 1590 try { 1591 final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType); 1592 final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage, 1593 Process.myUid(), ActivityThread.currentPackageName()); 1594 return mode != AppOpsManager.MODE_ALLOWED; 1595 } catch (RemoteException e) { 1596 return false; 1597 } 1598 } 1599 1600 /** 1601 * Stops playing the audio data. 1602 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 1603 * after the last buffer that was written has been played. For an immediate stop, use 1604 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 1605 * back yet. 1606 * @throws IllegalStateException 1607 */ 1608 public void stop() 1609 throws IllegalStateException { 1610 if (mState != STATE_INITIALIZED) { 1611 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 1612 } 1613 1614 // stop playing 1615 synchronized(mPlayStateLock) { 1616 native_stop(); 1617 mPlayState = PLAYSTATE_STOPPED; 1618 mAvSyncHeader = null; 1619 mAvSyncBytesRemaining = 0; 1620 } 1621 } 1622 1623 /** 1624 * Pauses the playback of the audio data. Data that has not been played 1625 * back will not be discarded. Subsequent calls to {@link #play} will play 1626 * this data back. See {@link #flush()} to discard this data. 1627 * 1628 * @throws IllegalStateException 1629 */ 1630 public void pause() 1631 throws IllegalStateException { 1632 if (mState != STATE_INITIALIZED) { 1633 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 1634 } 1635 //logd("pause()"); 1636 1637 // pause playback 1638 synchronized(mPlayStateLock) { 1639 native_pause(); 1640 mPlayState = PLAYSTATE_PAUSED; 1641 } 1642 } 1643 1644 1645 //--------------------------------------------------------- 1646 // Audio data supply 1647 //-------------------- 1648 1649 /** 1650 * Flushes the audio data currently queued for playback. Any data that has 1651 * been written but not yet presented will be discarded. No-op if not stopped or paused, 1652 * or if the track's creation mode is not {@link #MODE_STREAM}. 1653 * <BR> Note that although data written but not yet presented is discarded, there is no 1654 * guarantee that all of the buffer space formerly used by that data 1655 * is available for a subsequent write. 1656 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 1657 * less than or equal to the total buffer size 1658 * may return a short actual transfer count. 1659 */ 1660 public void flush() { 1661 if (mState == STATE_INITIALIZED) { 1662 // flush the data in native layer 1663 native_flush(); 1664 mAvSyncHeader = null; 1665 mAvSyncBytesRemaining = 0; 1666 } 1667 1668 } 1669 1670 /** 1671 * Writes the audio data to the audio sink for playback (streaming mode), 1672 * or copies audio data for later playback (static buffer mode). 1673 * The format specified in the AudioTrack constructor should be 1674 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1675 * <p> 1676 * In streaming mode, the write will normally block until all the data has been enqueued for 1677 * playback, and will return a full transfer count. However, if the track is stopped or paused 1678 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1679 * occurs during the write, then the write may return a short transfer count. 1680 * <p> 1681 * In static buffer mode, copies the data to the buffer starting at offset 0. 1682 * Note that the actual playback of this data might occur after this function returns. 1683 * 1684 * @param audioData the array that holds the data to play. 1685 * @param offsetInBytes the offset expressed in bytes in audioData where the data to play 1686 * starts. 1687 * @param sizeInBytes the number of bytes to read in audioData after the offset. 1688 * @return zero or the positive number of bytes that were written, or 1689 * {@link #ERROR_INVALID_OPERATION} 1690 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1691 * the parameters don't resolve to valid data and indexes, or 1692 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1693 * needs to be recreated. 1694 * The dead object error code is not returned if some data was successfully transferred. 1695 * In this case, the error is returned at the next write(). 1696 * 1697 * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code> 1698 * set to {@link #WRITE_BLOCKING}. 1699 */ 1700 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) { 1701 return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING); 1702 } 1703 1704 /** 1705 * Writes the audio data to the audio sink for playback (streaming mode), 1706 * or copies audio data for later playback (static buffer mode). 1707 * The format specified in the AudioTrack constructor should be 1708 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1709 * <p> 1710 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1711 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1712 * for playback, and will return a full transfer count. However, if the write mode is 1713 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1714 * interrupts the write by calling stop or pause, or an I/O error 1715 * occurs during the write, then the write may return a short transfer count. 1716 * <p> 1717 * In static buffer mode, copies the data to the buffer starting at offset 0, 1718 * and the write mode is ignored. 1719 * Note that the actual playback of this data might occur after this function returns. 1720 * 1721 * @param audioData the array that holds the data to play. 1722 * @param offsetInBytes the offset expressed in bytes in audioData where the data to play 1723 * starts. 1724 * @param sizeInBytes the number of bytes to read in audioData after the offset. 1725 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1726 * effect in static mode. 1727 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1728 * to the audio sink. 1729 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1730 * queuing as much audio data for playback as possible without blocking. 1731 * @return zero or the positive number of bytes that were written, or 1732 * {@link #ERROR_INVALID_OPERATION} 1733 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1734 * the parameters don't resolve to valid data and indexes, or 1735 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1736 * needs to be recreated. 1737 * The dead object error code is not returned if some data was successfully transferred. 1738 * In this case, the error is returned at the next write(). 1739 */ 1740 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes, 1741 @WriteMode int writeMode) { 1742 1743 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1744 return ERROR_INVALID_OPERATION; 1745 } 1746 1747 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1748 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1749 return ERROR_BAD_VALUE; 1750 } 1751 1752 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 1753 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 1754 || (offsetInBytes + sizeInBytes > audioData.length)) { 1755 return ERROR_BAD_VALUE; 1756 } 1757 1758 int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 1759 writeMode == WRITE_BLOCKING); 1760 1761 if ((mDataLoadMode == MODE_STATIC) 1762 && (mState == STATE_NO_STATIC_DATA) 1763 && (ret > 0)) { 1764 // benign race with respect to other APIs that read mState 1765 mState = STATE_INITIALIZED; 1766 } 1767 1768 return ret; 1769 } 1770 1771 /** 1772 * Writes the audio data to the audio sink for playback (streaming mode), 1773 * or copies audio data for later playback (static buffer mode). 1774 * The format specified in the AudioTrack constructor should be 1775 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1776 * <p> 1777 * In streaming mode, the write will normally block until all the data has been enqueued for 1778 * playback, and will return a full transfer count. However, if the track is stopped or paused 1779 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1780 * occurs during the write, then the write may return a short transfer count. 1781 * <p> 1782 * In static buffer mode, copies the data to the buffer starting at offset 0. 1783 * Note that the actual playback of this data might occur after this function returns. 1784 * 1785 * @param audioData the array that holds the data to play. 1786 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1787 * starts. 1788 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1789 * @return zero or the positive number of shorts that were written, or 1790 * {@link #ERROR_INVALID_OPERATION} 1791 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1792 * the parameters don't resolve to valid data and indexes, or 1793 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1794 * needs to be recreated. 1795 * The dead object error code is not returned if some data was successfully transferred. 1796 * In this case, the error is returned at the next write(). 1797 * 1798 * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code> 1799 * set to {@link #WRITE_BLOCKING}. 1800 */ 1801 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) { 1802 return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING); 1803 } 1804 1805 /** 1806 * Writes the audio data to the audio sink for playback (streaming mode), 1807 * or copies audio data for later playback (static buffer mode). 1808 * The format specified in the AudioTrack constructor should be 1809 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1810 * <p> 1811 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1812 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1813 * for playback, and will return a full transfer count. However, if the write mode is 1814 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1815 * interrupts the write by calling stop or pause, or an I/O error 1816 * occurs during the write, then the write may return a short transfer count. 1817 * <p> 1818 * In static buffer mode, copies the data to the buffer starting at offset 0. 1819 * Note that the actual playback of this data might occur after this function returns. 1820 * 1821 * @param audioData the array that holds the data to play. 1822 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1823 * starts. 1824 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1825 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1826 * effect in static mode. 1827 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1828 * to the audio sink. 1829 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1830 * queuing as much audio data for playback as possible without blocking. 1831 * @return zero or the positive number of shorts that were written, or 1832 * {@link #ERROR_INVALID_OPERATION} 1833 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1834 * the parameters don't resolve to valid data and indexes, or 1835 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1836 * needs to be recreated. 1837 * The dead object error code is not returned if some data was successfully transferred. 1838 * In this case, the error is returned at the next write(). 1839 */ 1840 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts, 1841 @WriteMode int writeMode) { 1842 1843 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1844 return ERROR_INVALID_OPERATION; 1845 } 1846 1847 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1848 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1849 return ERROR_BAD_VALUE; 1850 } 1851 1852 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 1853 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 1854 || (offsetInShorts + sizeInShorts > audioData.length)) { 1855 return ERROR_BAD_VALUE; 1856 } 1857 1858 int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat, 1859 writeMode == WRITE_BLOCKING); 1860 1861 if ((mDataLoadMode == MODE_STATIC) 1862 && (mState == STATE_NO_STATIC_DATA) 1863 && (ret > 0)) { 1864 // benign race with respect to other APIs that read mState 1865 mState = STATE_INITIALIZED; 1866 } 1867 1868 return ret; 1869 } 1870 1871 /** 1872 * Writes the audio data to the audio sink for playback (streaming mode), 1873 * or copies audio data for later playback (static buffer mode). 1874 * The format specified in the AudioTrack constructor should be 1875 * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array. 1876 * <p> 1877 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1878 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1879 * for playback, and will return a full transfer count. However, if the write mode is 1880 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1881 * interrupts the write by calling stop or pause, or an I/O error 1882 * occurs during the write, then the write may return a short transfer count. 1883 * <p> 1884 * In static buffer mode, copies the data to the buffer starting at offset 0, 1885 * and the write mode is ignored. 1886 * Note that the actual playback of this data might occur after this function returns. 1887 * 1888 * @param audioData the array that holds the data to play. 1889 * The implementation does not clip for sample values within the nominal range 1890 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 1891 * less than or equal to unity (1.0f), and in the absence of post-processing effects 1892 * that could add energy, such as reverb. For the convenience of applications 1893 * that compute samples using filters with non-unity gain, 1894 * sample values +3 dB beyond the nominal range are permitted. 1895 * However such values may eventually be limited or clipped, depending on various gains 1896 * and later processing in the audio path. Therefore applications are encouraged 1897 * to provide samples values within the nominal range. 1898 * @param offsetInFloats the offset, expressed as a number of floats, 1899 * in audioData where the data to play starts. 1900 * @param sizeInFloats the number of floats to read in audioData after the offset. 1901 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1902 * effect in static mode. 1903 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1904 * to the audio sink. 1905 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1906 * queuing as much audio data for playback as possible without blocking. 1907 * @return zero or the positive number of floats that were written, or 1908 * {@link #ERROR_INVALID_OPERATION} 1909 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1910 * the parameters don't resolve to valid data and indexes, or 1911 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1912 * needs to be recreated. 1913 * The dead object error code is not returned if some data was successfully transferred. 1914 * In this case, the error is returned at the next write(). 1915 */ 1916 public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats, 1917 @WriteMode int writeMode) { 1918 1919 if (mState == STATE_UNINITIALIZED) { 1920 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 1921 return ERROR_INVALID_OPERATION; 1922 } 1923 1924 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 1925 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 1926 return ERROR_INVALID_OPERATION; 1927 } 1928 1929 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1930 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1931 return ERROR_BAD_VALUE; 1932 } 1933 1934 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 1935 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 1936 || (offsetInFloats + sizeInFloats > audioData.length)) { 1937 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 1938 return ERROR_BAD_VALUE; 1939 } 1940 1941 int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 1942 writeMode == WRITE_BLOCKING); 1943 1944 if ((mDataLoadMode == MODE_STATIC) 1945 && (mState == STATE_NO_STATIC_DATA) 1946 && (ret > 0)) { 1947 // benign race with respect to other APIs that read mState 1948 mState = STATE_INITIALIZED; 1949 } 1950 1951 return ret; 1952 } 1953 1954 1955 /** 1956 * Writes the audio data to the audio sink for playback (streaming mode), 1957 * or copies audio data for later playback (static buffer mode). 1958 * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor. 1959 * <p> 1960 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1961 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1962 * for playback, and will return a full transfer count. However, if the write mode is 1963 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1964 * interrupts the write by calling stop or pause, or an I/O error 1965 * occurs during the write, then the write may return a short transfer count. 1966 * <p> 1967 * In static buffer mode, copies the data to the buffer starting at offset 0, 1968 * and the write mode is ignored. 1969 * Note that the actual playback of this data might occur after this function returns. 1970 * 1971 * @param audioData the buffer that holds the data to play, starting at the position reported 1972 * by <code>audioData.position()</code>. 1973 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 1974 * have been advanced to reflect the amount of data that was successfully written to 1975 * the AudioTrack. 1976 * @param sizeInBytes number of bytes to write. 1977 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 1978 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1979 * effect in static mode. 1980 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1981 * to the audio sink. 1982 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1983 * queuing as much audio data for playback as possible without blocking. 1984 * @return zero or the positive number of bytes that were written, or 1985 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 1986 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1987 * needs to be recreated. 1988 * The dead object error code is not returned if some data was successfully transferred. 1989 * In this case, the error is returned at the next write(). 1990 */ 1991 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 1992 @WriteMode int writeMode) { 1993 1994 if (mState == STATE_UNINITIALIZED) { 1995 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 1996 return ERROR_INVALID_OPERATION; 1997 } 1998 1999 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2000 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2001 return ERROR_BAD_VALUE; 2002 } 2003 2004 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2005 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2006 return ERROR_BAD_VALUE; 2007 } 2008 2009 int ret = 0; 2010 if (audioData.isDirect()) { 2011 ret = native_write_native_bytes(audioData, 2012 audioData.position(), sizeInBytes, mAudioFormat, 2013 writeMode == WRITE_BLOCKING); 2014 } else { 2015 ret = native_write_byte(NioUtils.unsafeArray(audioData), 2016 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 2017 sizeInBytes, mAudioFormat, 2018 writeMode == WRITE_BLOCKING); 2019 } 2020 2021 if ((mDataLoadMode == MODE_STATIC) 2022 && (mState == STATE_NO_STATIC_DATA) 2023 && (ret > 0)) { 2024 // benign race with respect to other APIs that read mState 2025 mState = STATE_INITIALIZED; 2026 } 2027 2028 if (ret > 0) { 2029 audioData.position(audioData.position() + ret); 2030 } 2031 2032 return ret; 2033 } 2034 2035 /** 2036 * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track. 2037 * The blocking behavior will depend on the write mode. 2038 * @param audioData the buffer that holds the data to play, starting at the position reported 2039 * by <code>audioData.position()</code>. 2040 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2041 * have been advanced to reflect the amount of data that was successfully written to 2042 * the AudioTrack. 2043 * @param sizeInBytes number of bytes to write. 2044 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2045 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. 2046 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2047 * to the audio sink. 2048 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2049 * queuing as much audio data for playback as possible without blocking. 2050 * @param timestamp The timestamp of the first decodable audio frame in the provided audioData. 2051 * @return zero or a positive number of bytes that were written, or 2052 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 2053 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2054 * needs to be recreated. 2055 * The dead object error code is not returned if some data was successfully transferred. 2056 * In this case, the error is returned at the next write(). 2057 */ 2058 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2059 @WriteMode int writeMode, long timestamp) { 2060 2061 if (mState == STATE_UNINITIALIZED) { 2062 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2063 return ERROR_INVALID_OPERATION; 2064 } 2065 2066 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2067 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2068 return ERROR_BAD_VALUE; 2069 } 2070 2071 if (mDataLoadMode != MODE_STREAM) { 2072 Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track"); 2073 return ERROR_INVALID_OPERATION; 2074 } 2075 2076 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) { 2077 Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts..."); 2078 return write(audioData, sizeInBytes, writeMode); 2079 } 2080 2081 if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2082 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2083 return ERROR_BAD_VALUE; 2084 } 2085 2086 // create timestamp header if none exists 2087 if (mAvSyncHeader == null) { 2088 mAvSyncHeader = ByteBuffer.allocate(16); 2089 mAvSyncHeader.order(ByteOrder.BIG_ENDIAN); 2090 mAvSyncHeader.putInt(0x55550001); 2091 mAvSyncHeader.putInt(sizeInBytes); 2092 mAvSyncHeader.putLong(timestamp); 2093 mAvSyncHeader.position(0); 2094 mAvSyncBytesRemaining = sizeInBytes; 2095 } 2096 2097 // write timestamp header if not completely written already 2098 int ret = 0; 2099 if (mAvSyncHeader.remaining() != 0) { 2100 ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode); 2101 if (ret < 0) { 2102 Log.e(TAG, "AudioTrack.write() could not write timestamp header!"); 2103 mAvSyncHeader = null; 2104 mAvSyncBytesRemaining = 0; 2105 return ret; 2106 } 2107 if (mAvSyncHeader.remaining() > 0) { 2108 Log.v(TAG, "AudioTrack.write() partial timestamp header written."); 2109 return 0; 2110 } 2111 } 2112 2113 // write audio data 2114 int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes); 2115 ret = write(audioData, sizeToWrite, writeMode); 2116 if (ret < 0) { 2117 Log.e(TAG, "AudioTrack.write() could not write audio data!"); 2118 mAvSyncHeader = null; 2119 mAvSyncBytesRemaining = 0; 2120 return ret; 2121 } 2122 2123 mAvSyncBytesRemaining -= ret; 2124 if (mAvSyncBytesRemaining == 0) { 2125 mAvSyncHeader = null; 2126 } 2127 2128 return ret; 2129 } 2130 2131 2132 /** 2133 * Sets the playback head position within the static buffer to zero, 2134 * that is it rewinds to start of static buffer. 2135 * The track must be stopped or paused, and 2136 * the track's creation mode must be {@link #MODE_STATIC}. 2137 * <p> 2138 * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by 2139 * {@link #getPlaybackHeadPosition()} to zero. 2140 * For earlier API levels, the reset behavior is unspecified. 2141 * <p> 2142 * Use {@link #setPlaybackHeadPosition(int)} with a zero position 2143 * if the reset of <code>getPlaybackHeadPosition()</code> is not needed. 2144 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2145 * {@link #ERROR_INVALID_OPERATION} 2146 */ 2147 public int reloadStaticData() { 2148 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 2149 return ERROR_INVALID_OPERATION; 2150 } 2151 return native_reload_static(); 2152 } 2153 2154 //-------------------------------------------------------------------------- 2155 // Audio effects management 2156 //-------------------- 2157 2158 /** 2159 * Attaches an auxiliary effect to the audio track. A typical auxiliary 2160 * effect is a reverberation effect which can be applied on any sound source 2161 * that directs a certain amount of its energy to this effect. This amount 2162 * is defined by setAuxEffectSendLevel(). 2163 * {@see #setAuxEffectSendLevel(float)}. 2164 * <p>After creating an auxiliary effect (e.g. 2165 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 2166 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 2167 * this method to attach the audio track to the effect. 2168 * <p>To detach the effect from the audio track, call this method with a 2169 * null effect id. 2170 * 2171 * @param effectId system wide unique id of the effect to attach 2172 * @return error code or success, see {@link #SUCCESS}, 2173 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 2174 */ 2175 public int attachAuxEffect(int effectId) { 2176 if (mState == STATE_UNINITIALIZED) { 2177 return ERROR_INVALID_OPERATION; 2178 } 2179 return native_attachAuxEffect(effectId); 2180 } 2181 2182 /** 2183 * Sets the send level of the audio track to the attached auxiliary effect 2184 * {@link #attachAuxEffect(int)}. Effect levels 2185 * are clamped to the closed interval [0.0, max] where 2186 * max is the value of {@link #getMaxVolume}. 2187 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 2188 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 2189 * this method must be called for the effect to be applied. 2190 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 2191 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 2192 * so an appropriate conversion from linear UI input x to level is: 2193 * x == 0 -> level = 0 2194 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 2195 * 2196 * @param level linear send level 2197 * @return error code or success, see {@link #SUCCESS}, 2198 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 2199 */ 2200 public int setAuxEffectSendLevel(float level) { 2201 if (isRestricted()) { 2202 return SUCCESS; 2203 } 2204 if (mState == STATE_UNINITIALIZED) { 2205 return ERROR_INVALID_OPERATION; 2206 } 2207 level = clampGainOrLevel(level); 2208 int err = native_setAuxEffectSendLevel(level); 2209 return err == 0 ? SUCCESS : ERROR; 2210 } 2211 2212 //-------------------------------------------------------------------------- 2213 // Explicit Routing 2214 //-------------------- 2215 private AudioDeviceInfo mPreferredDevice = null; 2216 2217 /** 2218 * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route 2219 * the output from this AudioTrack. 2220 * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink. 2221 * If deviceInfo is null, default routing is restored. 2222 * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and 2223 * does not correspond to a valid audio output device. 2224 */ 2225 public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) { 2226 // Do some validation.... 2227 if (deviceInfo != null && !deviceInfo.isSink()) { 2228 return false; 2229 } 2230 int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0; 2231 boolean status = native_setOutputDevice(preferredDeviceId); 2232 if (status == true) { 2233 synchronized (this) { 2234 mPreferredDevice = deviceInfo; 2235 } 2236 } 2237 return status; 2238 } 2239 2240 /** 2241 * Returns the selected output specified by {@link #setPreferredDevice}. Note that this 2242 * is not guaranteed to correspond to the actual device being used for playback. 2243 */ 2244 public AudioDeviceInfo getPreferredDevice() { 2245 synchronized (this) { 2246 return mPreferredDevice; 2247 } 2248 } 2249 2250 //-------------------------------------------------------------------------- 2251 // (Re)Routing Info 2252 //-------------------- 2253 /** 2254 * Defines the interface by which applications can receive notifications of routing 2255 * changes for the associated {@link AudioTrack}. 2256 */ 2257 public interface OnRoutingChangedListener { 2258 /** 2259 * Called when the routing of an AudioTrack changes from either and explicit or 2260 * policy rerouting. Use {@link #getRoutedDevice()} to retrieve the newly routed-to 2261 * device. 2262 */ 2263 public void onRoutingChanged(AudioTrack audioTrack); 2264 } 2265 2266 /** 2267 * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack. 2268 * Note: The query is only valid if the AudioTrack is currently playing. If it is not, 2269 * <code>getRoutedDevice()</code> will return null. 2270 */ 2271 public AudioDeviceInfo getRoutedDevice() { 2272 int deviceId = native_getRoutedDeviceId(); 2273 if (deviceId == 0) { 2274 return null; 2275 } 2276 AudioDeviceInfo[] devices = 2277 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS); 2278 for (int i = 0; i < devices.length; i++) { 2279 if (devices[i].getId() == deviceId) { 2280 return devices[i]; 2281 } 2282 } 2283 return null; 2284 } 2285 2286 /** 2287 * The list of AudioTrack.OnRoutingChangedListener interfaces added (with 2288 * {@link AudioTrack#addOnRoutingChangedListener(OnRoutingChangedListener, android.os.Handler)} 2289 * by an app to receive (re)routing notifications. 2290 */ 2291 private ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate> 2292 mRoutingChangeListeners = 2293 new ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate>(); 2294 2295 /** 2296 * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes 2297 * on this AudioTrack. 2298 * @param listener The {@link OnRoutingChangedListener} interface to receive notifications 2299 * of rerouting events. 2300 * @param handler Specifies the {@link Handler} object for the thread on which to execute 2301 * the callback. If <code>null</code>, the {@link Handler} associated with the main 2302 * {@link Looper} will be used. 2303 */ 2304 public void addOnRoutingChangedListener(OnRoutingChangedListener listener, 2305 android.os.Handler handler) { 2306 if (listener != null && !mRoutingChangeListeners.containsKey(listener)) { 2307 synchronized (mRoutingChangeListeners) { 2308 if (mRoutingChangeListeners.size() == 0) { 2309 native_enableDeviceCallback(); 2310 } 2311 mRoutingChangeListeners.put( 2312 listener, new NativeRoutingEventHandlerDelegate(this, listener, 2313 handler != null ? handler : new Handler(mInitializationLooper))); 2314 } 2315 } 2316 } 2317 2318 /** 2319 * Removes an {@link OnRoutingChangedListener} which has been previously added 2320 * to receive rerouting notifications. 2321 * @param listener The previously added {@link OnRoutingChangedListener} interface to remove. 2322 */ 2323 public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) { 2324 synchronized (mRoutingChangeListeners) { 2325 if (mRoutingChangeListeners.containsKey(listener)) { 2326 mRoutingChangeListeners.remove(listener); 2327 } 2328 if (mRoutingChangeListeners.size() == 0) { 2329 native_disableDeviceCallback(); 2330 } 2331 } 2332 } 2333 2334 /** 2335 * Sends device list change notification to all listeners. 2336 */ 2337 private void broadcastRoutingChange() { 2338 Collection<NativeRoutingEventHandlerDelegate> values; 2339 synchronized (mRoutingChangeListeners) { 2340 values = mRoutingChangeListeners.values(); 2341 } 2342 AudioManager.resetAudioPortGeneration(); 2343 for(NativeRoutingEventHandlerDelegate delegate : values) { 2344 Handler handler = delegate.getHandler(); 2345 if (handler != null) { 2346 handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE); 2347 } 2348 } 2349 } 2350 2351 //--------------------------------------------------------- 2352 // Interface definitions 2353 //-------------------- 2354 /** 2355 * Interface definition for a callback to be invoked when the playback head position of 2356 * an AudioTrack has reached a notification marker or has increased by a certain period. 2357 */ 2358 public interface OnPlaybackPositionUpdateListener { 2359 /** 2360 * Called on the listener to notify it that the previously set marker has been reached 2361 * by the playback head. 2362 */ 2363 void onMarkerReached(AudioTrack track); 2364 2365 /** 2366 * Called on the listener to periodically notify it that the playback head has reached 2367 * a multiple of the notification period. 2368 */ 2369 void onPeriodicNotification(AudioTrack track); 2370 } 2371 2372 //--------------------------------------------------------- 2373 // Inner classes 2374 //-------------------- 2375 /** 2376 * Helper class to handle the forwarding of native events to the appropriate listener 2377 * (potentially) handled in a different thread 2378 */ 2379 private class NativePositionEventHandlerDelegate { 2380 private final Handler mHandler; 2381 2382 NativePositionEventHandlerDelegate(final AudioTrack track, 2383 final OnPlaybackPositionUpdateListener listener, 2384 Handler handler) { 2385 // find the looper for our new event handler 2386 Looper looper; 2387 if (handler != null) { 2388 looper = handler.getLooper(); 2389 } else { 2390 // no given handler, use the looper the AudioTrack was created in 2391 looper = mInitializationLooper; 2392 } 2393 2394 // construct the event handler with this looper 2395 if (looper != null) { 2396 // implement the event handler delegate 2397 mHandler = new Handler(looper) { 2398 @Override 2399 public void handleMessage(Message msg) { 2400 if (track == null) { 2401 return; 2402 } 2403 switch(msg.what) { 2404 case NATIVE_EVENT_MARKER: 2405 if (listener != null) { 2406 listener.onMarkerReached(track); 2407 } 2408 break; 2409 case NATIVE_EVENT_NEW_POS: 2410 if (listener != null) { 2411 listener.onPeriodicNotification(track); 2412 } 2413 break; 2414 default: 2415 loge("Unknown native event type: " + msg.what); 2416 break; 2417 } 2418 } 2419 }; 2420 } else { 2421 mHandler = null; 2422 } 2423 } 2424 2425 Handler getHandler() { 2426 return mHandler; 2427 } 2428 } 2429 2430 /** 2431 * Helper class to handle the forwarding of native events to the appropriate listener 2432 * (potentially) handled in a different thread 2433 */ 2434 private class NativeRoutingEventHandlerDelegate { 2435 private final Handler mHandler; 2436 2437 NativeRoutingEventHandlerDelegate(final AudioTrack track, 2438 final OnRoutingChangedListener listener, 2439 Handler handler) { 2440 // find the looper for our new event handler 2441 Looper looper; 2442 if (handler != null) { 2443 looper = handler.getLooper(); 2444 } else { 2445 // no given handler, use the looper the AudioTrack was created in 2446 looper = mInitializationLooper; 2447 } 2448 2449 // construct the event handler with this looper 2450 if (looper != null) { 2451 // implement the event handler delegate 2452 mHandler = new Handler(looper) { 2453 @Override 2454 public void handleMessage(Message msg) { 2455 if (track == null) { 2456 return; 2457 } 2458 switch(msg.what) { 2459 case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE: 2460 if (listener != null) { 2461 listener.onRoutingChanged(track); 2462 } 2463 break; 2464 default: 2465 loge("Unknown native event type: " + msg.what); 2466 break; 2467 } 2468 } 2469 }; 2470 } else { 2471 mHandler = null; 2472 } 2473 } 2474 2475 Handler getHandler() { 2476 return mHandler; 2477 } 2478 } 2479 2480 //--------------------------------------------------------- 2481 // Java methods called from the native side 2482 //-------------------- 2483 @SuppressWarnings("unused") 2484 private static void postEventFromNative(Object audiotrack_ref, 2485 int what, int arg1, int arg2, Object obj) { 2486 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 2487 AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get(); 2488 if (track == null) { 2489 return; 2490 } 2491 2492 if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) { 2493 track.broadcastRoutingChange(); 2494 return; 2495 } 2496 NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate; 2497 if (delegate != null) { 2498 Handler handler = delegate.getHandler(); 2499 if (handler != null) { 2500 Message m = handler.obtainMessage(what, arg1, arg2, obj); 2501 handler.sendMessage(m); 2502 } 2503 } 2504 } 2505 2506 2507 //--------------------------------------------------------- 2508 // Native methods called from the Java side 2509 //-------------------- 2510 2511 // post-condition: mStreamType is overwritten with a value 2512 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 2513 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC 2514 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 2515 Object /*AudioAttributes*/ attributes, 2516 int sampleRate, int channelMask, int channelIndexMask, int audioFormat, 2517 int buffSizeInBytes, int mode, int[] sessionId); 2518 2519 private native final void native_finalize(); 2520 2521 private native final void native_release(); 2522 2523 private native final void native_start(); 2524 2525 private native final void native_stop(); 2526 2527 private native final void native_pause(); 2528 2529 private native final void native_flush(); 2530 2531 private native final int native_write_byte(byte[] audioData, 2532 int offsetInBytes, int sizeInBytes, int format, 2533 boolean isBlocking); 2534 2535 private native final int native_write_short(short[] audioData, 2536 int offsetInShorts, int sizeInShorts, int format, 2537 boolean isBlocking); 2538 2539 private native final int native_write_float(float[] audioData, 2540 int offsetInFloats, int sizeInFloats, int format, 2541 boolean isBlocking); 2542 2543 private native final int native_write_native_bytes(Object audioData, 2544 int positionInBytes, int sizeInBytes, int format, boolean blocking); 2545 2546 private native final int native_reload_static(); 2547 2548 private native final int native_get_native_frame_count(); 2549 2550 private native final void native_setVolume(float leftVolume, float rightVolume); 2551 2552 private native final int native_set_playback_rate(int sampleRateInHz); 2553 private native final int native_get_playback_rate(); 2554 2555 private native final void native_set_playback_params(@NonNull PlaybackParams params); 2556 private native final @NonNull PlaybackParams native_get_playback_params(); 2557 2558 private native final int native_set_marker_pos(int marker); 2559 private native final int native_get_marker_pos(); 2560 2561 private native final int native_set_pos_update_period(int updatePeriod); 2562 private native final int native_get_pos_update_period(); 2563 2564 private native final int native_set_position(int position); 2565 private native final int native_get_position(); 2566 2567 private native final int native_get_latency(); 2568 2569 // longArray must be a non-null array of length >= 2 2570 // [0] is assigned the frame position 2571 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds 2572 private native final int native_get_timestamp(long[] longArray); 2573 2574 private native final int native_set_loop(int start, int end, int loopCount); 2575 2576 static private native final int native_get_output_sample_rate(int streamType); 2577 static private native final int native_get_min_buff_size( 2578 int sampleRateInHz, int channelConfig, int audioFormat); 2579 2580 private native final int native_attachAuxEffect(int effectId); 2581 private native final int native_setAuxEffectSendLevel(float level); 2582 2583 private native final boolean native_setOutputDevice(int deviceId); 2584 private native final int native_getRoutedDeviceId(); 2585 private native final void native_enableDeviceCallback(); 2586 private native final void native_disableDeviceCallback(); 2587 // FIXME static private native int native_get_FCC_8(); 2588 2589 //--------------------------------------------------------- 2590 // Utility methods 2591 //------------------ 2592 2593 private static void logd(String msg) { 2594 Log.d(TAG, msg); 2595 } 2596 2597 private static void loge(String msg) { 2598 Log.e(TAG, msg); 2599 } 2600} 2601