AudioTrack.java revision 5f68c7a3f8400bcef233bf02d9a722f6d21f5c34
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17package android.media; 18 19import java.lang.annotation.Retention; 20import java.lang.annotation.RetentionPolicy; 21import java.lang.ref.WeakReference; 22import java.lang.Math; 23import java.nio.ByteBuffer; 24import java.nio.ByteOrder; 25import java.nio.NioUtils; 26import java.util.Collection; 27 28import android.annotation.IntDef; 29import android.annotation.NonNull; 30import android.annotation.SystemApi; 31import android.app.ActivityThread; 32import android.app.AppOpsManager; 33import android.content.Context; 34import android.os.Handler; 35import android.os.IBinder; 36import android.os.Looper; 37import android.os.Message; 38import android.os.Process; 39import android.os.RemoteException; 40import android.os.ServiceManager; 41import android.util.ArrayMap; 42import android.util.Log; 43 44import com.android.internal.app.IAppOpsService; 45 46 47/** 48 * The AudioTrack class manages and plays a single audio resource for Java applications. 49 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 50 * achieved by "pushing" the data to the AudioTrack object using one of the 51 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 52 * and {@link #write(float[], int, int, int)} methods. 53 * 54 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 55 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 56 * one of the {@code write()} methods. These are blocking and return when the data has been 57 * transferred from the Java layer to the native layer and queued for playback. The streaming 58 * mode is most useful when playing blocks of audio data that for instance are: 59 * 60 * <ul> 61 * <li>too big to fit in memory because of the duration of the sound to play,</li> 62 * <li>too big to fit in memory because of the characteristics of the audio data 63 * (high sampling rate, bits per sample ...)</li> 64 * <li>received or generated while previously queued audio is playing.</li> 65 * </ul> 66 * 67 * The static mode should be chosen when dealing with short sounds that fit in memory and 68 * that need to be played with the smallest latency possible. The static mode will 69 * therefore be preferred for UI and game sounds that are played often, and with the 70 * smallest overhead possible. 71 * 72 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 73 * The size of this buffer, specified during the construction, determines how long an AudioTrack 74 * can play before running out of data.<br> 75 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 76 * be played from it.<br> 77 * For the streaming mode, data will be written to the audio sink in chunks of 78 * sizes less than or equal to the total buffer size. 79 * 80 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 81 */ 82public class AudioTrack 83{ 84 //--------------------------------------------------------- 85 // Constants 86 //-------------------- 87 /** Minimum value for a linear gain or auxiliary effect level. 88 * This value must be exactly equal to 0.0f; do not change it. 89 */ 90 private static final float GAIN_MIN = 0.0f; 91 /** Maximum value for a linear gain or auxiliary effect level. 92 * This value must be greater than or equal to 1.0f. 93 */ 94 private static final float GAIN_MAX = 1.0f; 95 96 /** Minimum value for sample rate */ 97 private static final int SAMPLE_RATE_HZ_MIN = 4000; 98 /** Maximum value for sample rate */ 99 private static final int SAMPLE_RATE_HZ_MAX = 192000; 100 101 /** Maximum value for AudioTrack channel count */ 102 private static final int CHANNEL_COUNT_MAX = 8; 103 104 /** indicates AudioTrack state is stopped */ 105 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 106 /** indicates AudioTrack state is paused */ 107 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 108 /** indicates AudioTrack state is playing */ 109 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 110 111 // keep these values in sync with android_media_AudioTrack.cpp 112 /** 113 * Creation mode where audio data is transferred from Java to the native layer 114 * only once before the audio starts playing. 115 */ 116 public static final int MODE_STATIC = 0; 117 /** 118 * Creation mode where audio data is streamed from Java to the native layer 119 * as the audio is playing. 120 */ 121 public static final int MODE_STREAM = 1; 122 123 /** @hide */ 124 @IntDef({ 125 MODE_STATIC, 126 MODE_STREAM 127 }) 128 @Retention(RetentionPolicy.SOURCE) 129 public @interface TransferMode {} 130 131 /** 132 * State of an AudioTrack that was not successfully initialized upon creation. 133 */ 134 public static final int STATE_UNINITIALIZED = 0; 135 /** 136 * State of an AudioTrack that is ready to be used. 137 */ 138 public static final int STATE_INITIALIZED = 1; 139 /** 140 * State of a successfully initialized AudioTrack that uses static data, 141 * but that hasn't received that data yet. 142 */ 143 public static final int STATE_NO_STATIC_DATA = 2; 144 145 /** 146 * Denotes a successful operation. 147 */ 148 public static final int SUCCESS = AudioSystem.SUCCESS; 149 /** 150 * Denotes a generic operation failure. 151 */ 152 public static final int ERROR = AudioSystem.ERROR; 153 /** 154 * Denotes a failure due to the use of an invalid value. 155 */ 156 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 157 /** 158 * Denotes a failure due to the improper use of a method. 159 */ 160 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 161 162 // Error codes: 163 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 164 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 165 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 166 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 167 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 168 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 169 170 // Events: 171 // to keep in sync with frameworks/av/include/media/AudioTrack.h 172 /** 173 * Event id denotes when playback head has reached a previously set marker. 174 */ 175 private static final int NATIVE_EVENT_MARKER = 3; 176 /** 177 * Event id denotes when previously set update period has elapsed during playback. 178 */ 179 private static final int NATIVE_EVENT_NEW_POS = 4; 180 181 private final static String TAG = "android.media.AudioTrack"; 182 183 184 /** @hide */ 185 @IntDef({ 186 WRITE_BLOCKING, 187 WRITE_NON_BLOCKING 188 }) 189 @Retention(RetentionPolicy.SOURCE) 190 public @interface WriteMode {} 191 192 /** 193 * The write mode indicating the write operation will block until all data has been written, 194 * to be used in {@link #write(ByteBuffer, int, int)} 195 */ 196 public final static int WRITE_BLOCKING = 0; 197 /** 198 * The write mode indicating the write operation will return immediately after 199 * queuing as much audio data for playback as possible without blocking, to be used in 200 * {@link #write(ByteBuffer, int, int)}. 201 */ 202 public final static int WRITE_NON_BLOCKING = 1; 203 204 //-------------------------------------------------------------------------- 205 // Member variables 206 //-------------------- 207 /** 208 * Indicates the state of the AudioTrack instance. 209 */ 210 private int mState = STATE_UNINITIALIZED; 211 /** 212 * Indicates the play state of the AudioTrack instance. 213 */ 214 private int mPlayState = PLAYSTATE_STOPPED; 215 /** 216 * Lock to make sure mPlayState updates are reflecting the actual state of the object. 217 */ 218 private final Object mPlayStateLock = new Object(); 219 /** 220 * Sizes of the native audio buffer. 221 * These values are set during construction and can be stale. 222 * To obtain the current native audio buffer frame count use {@link #getNativeFrameCount()}. 223 */ 224 private int mNativeBufferSizeInBytes = 0; 225 private int mNativeBufferSizeInFrames = 0; 226 /** 227 * Handler for events coming from the native code. 228 */ 229 private NativePositionEventHandlerDelegate mEventHandlerDelegate; 230 /** 231 * Looper associated with the thread that creates the AudioTrack instance. 232 */ 233 private final Looper mInitializationLooper; 234 /** 235 * The audio data source sampling rate in Hz. 236 */ 237 private int mSampleRate; // initialized by all constructors 238 /** 239 * The number of audio output channels (1 is mono, 2 is stereo). 240 */ 241 private int mChannelCount = 1; 242 /** 243 * The audio channel mask used for calling native AudioTrack 244 */ 245 private int mChannels = AudioFormat.CHANNEL_OUT_MONO; 246 247 /** 248 * The type of the audio stream to play. See 249 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 250 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 251 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 252 * {@link AudioManager#STREAM_DTMF}. 253 */ 254 private int mStreamType = AudioManager.STREAM_MUSIC; 255 256 private final AudioAttributes mAttributes; 257 /** 258 * The way audio is consumed by the audio sink, streaming or static. 259 */ 260 private int mDataLoadMode = MODE_STREAM; 261 /** 262 * The current channel position mask, as specified on AudioTrack creation. 263 * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}. 264 * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified. 265 */ 266 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 267 /** 268 * The current audio channel index configuration (if specified). 269 */ 270 private int mChannelIndexMask = 0; 271 /** 272 * The encoding of the audio samples. 273 * @see AudioFormat#ENCODING_PCM_8BIT 274 * @see AudioFormat#ENCODING_PCM_16BIT 275 * @see AudioFormat#ENCODING_PCM_FLOAT 276 */ 277 private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT; 278 /** 279 * Audio session ID 280 */ 281 private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE; 282 /** 283 * Reference to the app-ops service. 284 */ 285 private final IAppOpsService mAppOps; 286 /** 287 * HW_AV_SYNC track AV Sync Header 288 */ 289 private ByteBuffer mAvSyncHeader = null; 290 /** 291 * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header 292 */ 293 private int mAvSyncBytesRemaining = 0; 294 295 //-------------------------------- 296 // Used exclusively by native code 297 //-------------------- 298 /** 299 * Accessed by native methods: provides access to C++ AudioTrack object. 300 */ 301 @SuppressWarnings("unused") 302 private long mNativeTrackInJavaObj; 303 /** 304 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 305 * the native AudioTrack object, but not stored in it). 306 */ 307 @SuppressWarnings("unused") 308 private long mJniData; 309 310 311 //-------------------------------------------------------------------------- 312 // Constructor, Finalize 313 //-------------------- 314 /** 315 * Class constructor. 316 * @param streamType the type of the audio stream. See 317 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 318 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 319 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 320 * @param sampleRateInHz the initial source sample rate expressed in Hz. 321 * @param channelConfig describes the configuration of the audio channels. 322 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 323 * {@link AudioFormat#CHANNEL_OUT_STEREO} 324 * @param audioFormat the format in which the audio data is represented. 325 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 326 * {@link AudioFormat#ENCODING_PCM_8BIT}, 327 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 328 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 329 * read from for playback. This should be a multiple of the frame size in bytes. 330 * <p> If the track's creation mode is {@link #MODE_STATIC}, 331 * this is the maximum length sample, or audio clip, that can be played by this instance. 332 * <p> If the track's creation mode is {@link #MODE_STREAM}, 333 * this should be the desired buffer size 334 * for the <code>AudioTrack</code> to satisfy the application's 335 * natural latency requirements. 336 * If <code>bufferSizeInBytes</code> is less than the 337 * minimum buffer size for the output sink, it is automatically increased to the minimum 338 * buffer size. 339 * The method {@link #getNativeFrameCount()} returns the 340 * actual size in frames of the native buffer created, which 341 * determines the frequency to write 342 * to the streaming <code>AudioTrack</code> to avoid underrun. 343 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 344 * @throws java.lang.IllegalArgumentException 345 */ 346 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 347 int bufferSizeInBytes, int mode) 348 throws IllegalArgumentException { 349 this(streamType, sampleRateInHz, channelConfig, audioFormat, 350 bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE); 351 } 352 353 /** 354 * Class constructor with audio session. Use this constructor when the AudioTrack must be 355 * attached to a particular audio session. The primary use of the audio session ID is to 356 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 357 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 358 * and media players in the same session and not to the output mix. 359 * When an AudioTrack is created without specifying a session, it will create its own session 360 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 361 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 362 * session 363 * with all other media players or audio tracks in the same session, otherwise a new session 364 * will be created for this track if none is supplied. 365 * @param streamType the type of the audio stream. See 366 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 367 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 368 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 369 * @param sampleRateInHz the initial source sample rate expressed in Hz. 370 * @param channelConfig describes the configuration of the audio channels. 371 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 372 * {@link AudioFormat#CHANNEL_OUT_STEREO} 373 * @param audioFormat the format in which the audio data is represented. 374 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 375 * {@link AudioFormat#ENCODING_PCM_8BIT}, 376 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 377 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 378 * from for playback. If using the AudioTrack in streaming mode, you can write data into 379 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 380 * this is the maximum size of the sound that will be played for this instance. 381 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 382 * for the successful creation of an AudioTrack instance in streaming mode. Using values 383 * smaller than getMinBufferSize() will result in an initialization failure. 384 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 385 * @param sessionId Id of audio session the AudioTrack must be attached to 386 * @throws java.lang.IllegalArgumentException 387 */ 388 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 389 int bufferSizeInBytes, int mode, int sessionId) 390 throws IllegalArgumentException { 391 // mState already == STATE_UNINITIALIZED 392 this((new AudioAttributes.Builder()) 393 .setLegacyStreamType(streamType) 394 .build(), 395 (new AudioFormat.Builder()) 396 .setChannelMask(channelConfig) 397 .setEncoding(audioFormat) 398 .setSampleRate(sampleRateInHz) 399 .build(), 400 bufferSizeInBytes, 401 mode, sessionId); 402 } 403 404 /** 405 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 406 * @param attributes a non-null {@link AudioAttributes} instance. 407 * @param format a non-null {@link AudioFormat} instance describing the format of the data 408 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 409 * configuring the audio format parameters such as encoding, channel mask and sample rate. 410 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 411 * from for playback. If using the AudioTrack in streaming mode, you can write data into 412 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 413 * this is the maximum size of the sound that will be played for this instance. 414 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 415 * for the successful creation of an AudioTrack instance in streaming mode. Using values 416 * smaller than getMinBufferSize() will result in an initialization failure. 417 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 418 * @param sessionId ID of audio session the AudioTrack must be attached to, or 419 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 420 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 421 * construction. 422 * @throws IllegalArgumentException 423 */ 424 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 425 int mode, int sessionId) 426 throws IllegalArgumentException { 427 // mState already == STATE_UNINITIALIZED 428 429 if (attributes == null) { 430 throw new IllegalArgumentException("Illegal null AudioAttributes"); 431 } 432 if (format == null) { 433 throw new IllegalArgumentException("Illegal null AudioFormat"); 434 } 435 436 // remember which looper is associated with the AudioTrack instantiation 437 Looper looper; 438 if ((looper = Looper.myLooper()) == null) { 439 looper = Looper.getMainLooper(); 440 } 441 442 int rate = 0; 443 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0) 444 { 445 rate = format.getSampleRate(); 446 } else { 447 rate = AudioSystem.getPrimaryOutputSamplingRate(); 448 if (rate <= 0) { 449 rate = 44100; 450 } 451 } 452 int channelIndexMask = 0; 453 if ((format.getPropertySetMask() 454 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { 455 channelIndexMask = format.getChannelIndexMask(); 456 } 457 int channelMask = 0; 458 if ((format.getPropertySetMask() 459 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { 460 channelMask = format.getChannelMask(); 461 } else if (channelIndexMask == 0) { // if no masks at all, use stereo 462 channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT 463 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 464 } 465 int encoding = AudioFormat.ENCODING_DEFAULT; 466 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 467 encoding = format.getEncoding(); 468 } 469 audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); 470 mStreamType = AudioSystem.STREAM_DEFAULT; 471 472 audioBuffSizeCheck(bufferSizeInBytes); 473 474 mInitializationLooper = looper; 475 IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE); 476 mAppOps = IAppOpsService.Stub.asInterface(b); 477 478 mAttributes = (new AudioAttributes.Builder(attributes).build()); 479 480 if (sessionId < 0) { 481 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 482 } 483 484 int[] session = new int[1]; 485 session[0] = sessionId; 486 // native initialization 487 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 488 mSampleRate, mChannels, mChannelIndexMask, mAudioFormat, 489 mNativeBufferSizeInBytes, mDataLoadMode, session); 490 if (initResult != SUCCESS) { 491 loge("Error code "+initResult+" when initializing AudioTrack."); 492 return; // with mState == STATE_UNINITIALIZED 493 } 494 495 mSessionId = session[0]; 496 497 if (mDataLoadMode == MODE_STATIC) { 498 mState = STATE_NO_STATIC_DATA; 499 } else { 500 mState = STATE_INITIALIZED; 501 } 502 } 503 504 /** 505 * Builder class for {@link AudioTrack} objects. 506 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 507 * attributes and audio format parameters, you indicate which of those vary from the default 508 * behavior on the device. 509 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 510 * parameters, to be used by a new <code>AudioTrack</code> instance: 511 * 512 * <pre class="prettyprint"> 513 * AudioTrack player = new AudioTrack.Builder() 514 * .setAudioAttributes(new AudioAttributes.Builder() 515 * .setUsage(AudioAttributes.USAGE_ALARM) 516 * .setContentType(CONTENT_TYPE_MUSIC) 517 * .build()) 518 * .setAudioFormat(new AudioFormat.Builder() 519 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 520 * .setSampleRate(441000) 521 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 522 * .build()) 523 * .setBufferSize(minBuffSize) 524 * .build(); 525 * </pre> 526 * <p> 527 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 528 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 529 * <br>If the audio format is not specified or is incomplete, its sample rate will be the 530 * default output sample rate of the device (see 531 * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be 532 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 533 * {@link AudioFormat#ENCODING_PCM_16BIT}. 534 * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)}, 535 * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used. 536 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 537 * <code>MODE_STREAM</code> will be used. 538 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 539 * be generated. 540 */ 541 public static class Builder { 542 private AudioAttributes mAttributes; 543 private AudioFormat mFormat; 544 private int mBufferSizeInBytes; 545 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 546 private int mMode = MODE_STREAM; 547 548 /** 549 * Constructs a new Builder with the default values as described above. 550 */ 551 public Builder() { 552 } 553 554 /** 555 * Sets the {@link AudioAttributes}. 556 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 557 * data to be played. 558 * @return the same Builder instance. 559 * @throws IllegalArgumentException 560 */ 561 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 562 throws IllegalArgumentException { 563 if (attributes == null) { 564 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 565 } 566 // keep reference, we only copy the data when building 567 mAttributes = attributes; 568 return this; 569 } 570 571 /** 572 * Sets the format of the audio data to be played by the {@link AudioTrack}. 573 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 574 * as encoding, channel mask and sample rate. 575 * @param format a non-null {@link AudioFormat} instance. 576 * @return the same Builder instance. 577 * @throws IllegalArgumentException 578 */ 579 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 580 throws IllegalArgumentException { 581 if (format == null) { 582 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 583 } 584 // keep reference, we only copy the data when building 585 mFormat = format; 586 return this; 587 } 588 589 /** 590 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 591 * If using the {@link AudioTrack} in streaming mode 592 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 593 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 594 * the minimum required buffer size for the successful creation of an AudioTrack instance 595 * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result 596 * in an exception when trying to build the <code>AudioTrack</code>. 597 * <br>If using the <code>AudioTrack</code> in static mode (see 598 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 599 * played by this instance. 600 * @param bufferSizeInBytes 601 * @return the same Builder instance. 602 * @throws IllegalArgumentException 603 */ 604 public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes) 605 throws IllegalArgumentException { 606 if (bufferSizeInBytes <= 0) { 607 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 608 } 609 mBufferSizeInBytes = bufferSizeInBytes; 610 return this; 611 } 612 613 /** 614 * Sets the mode under which buffers of audio data are transferred from the 615 * {@link AudioTrack} to the framework. 616 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 617 * @return the same Builder instance. 618 * @throws IllegalArgumentException 619 */ 620 public @NonNull Builder setTransferMode(@TransferMode int mode) 621 throws IllegalArgumentException { 622 switch(mode) { 623 case MODE_STREAM: 624 case MODE_STATIC: 625 mMode = mode; 626 break; 627 default: 628 throw new IllegalArgumentException("Invalid transfer mode " + mode); 629 } 630 return this; 631 } 632 633 /** 634 * Sets the session ID the {@link AudioTrack} will be attached to. 635 * @param sessionId a strictly positive ID number retrieved from another 636 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 637 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 638 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 639 * @return the same Builder instance. 640 * @throws IllegalArgumentException 641 */ 642 public @NonNull Builder setSessionId(int sessionId) 643 throws IllegalArgumentException { 644 if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 645 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 646 } 647 mSessionId = sessionId; 648 return this; 649 } 650 651 /** 652 * Builds an {@link AudioTrack} instance initialized with all the parameters set 653 * on this <code>Builder</code>. 654 * @return a new {@link AudioTrack} instance. 655 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 656 * were incompatible, or if they are not supported by the device. 657 */ 658 public @NonNull AudioTrack build() throws UnsupportedOperationException { 659 if (mAttributes == null) { 660 mAttributes = new AudioAttributes.Builder() 661 .setUsage(AudioAttributes.USAGE_MEDIA) 662 .build(); 663 } 664 if (mFormat == null) { 665 mFormat = new AudioFormat.Builder() 666 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 667 .setSampleRate(AudioSystem.getPrimaryOutputSamplingRate()) 668 .setEncoding(AudioFormat.ENCODING_DEFAULT) 669 .build(); 670 } 671 try { 672 // If the buffer size is not specified in streaming mode, 673 // use a single frame for the buffer size and let the 674 // native code figure out the minimum buffer size. 675 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) { 676 mBufferSizeInBytes = mFormat.getChannelCount() 677 * mFormat.getBytesPerSample(mFormat.getEncoding()); 678 } 679 return new AudioTrack(mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId); 680 } catch (IllegalArgumentException e) { 681 throw new UnsupportedOperationException(e.getMessage()); 682 } 683 } 684 } 685 686 // mask of all the channels supported by this implementation 687 private static final int SUPPORTED_OUT_CHANNELS = 688 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 689 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 690 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 691 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 692 AudioFormat.CHANNEL_OUT_BACK_LEFT | 693 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 694 AudioFormat.CHANNEL_OUT_BACK_CENTER | 695 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 696 AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 697 698 // Convenience method for the constructor's parameter checks. 699 // This is where constructor IllegalArgumentException-s are thrown 700 // postconditions: 701 // mChannelCount is valid 702 // mChannels is valid 703 // mAudioFormat is valid 704 // mSampleRate is valid 705 // mDataLoadMode is valid 706 private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, 707 int audioFormat, int mode) { 708 //-------------- 709 // sample rate, note these values are subject to change 710 if (sampleRateInHz < SAMPLE_RATE_HZ_MIN || sampleRateInHz > SAMPLE_RATE_HZ_MAX) { 711 throw new IllegalArgumentException(sampleRateInHz 712 + "Hz is not a supported sample rate."); 713 } 714 mSampleRate = sampleRateInHz; 715 716 //-------------- 717 // channel config 718 mChannelConfiguration = channelConfig; 719 720 switch (channelConfig) { 721 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 722 case AudioFormat.CHANNEL_OUT_MONO: 723 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 724 mChannelCount = 1; 725 mChannels = AudioFormat.CHANNEL_OUT_MONO; 726 break; 727 case AudioFormat.CHANNEL_OUT_STEREO: 728 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 729 mChannelCount = 2; 730 mChannels = AudioFormat.CHANNEL_OUT_STEREO; 731 break; 732 default: 733 if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) { 734 mChannelCount = 0; 735 break; // channel index configuration only 736 } 737 if (!isMultichannelConfigSupported(channelConfig)) { 738 // input channel configuration features unsupported channels 739 throw new IllegalArgumentException("Unsupported channel configuration."); 740 } 741 mChannels = channelConfig; 742 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 743 } 744 // check the channel index configuration (if present) 745 mChannelIndexMask = channelIndexMask; 746 if (mChannelIndexMask != 0) { 747 // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2 748 final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1; 749 if ((channelIndexMask & ~indexMask) != 0) { 750 throw new IllegalArgumentException("Unsupported channel index configuration " 751 + channelIndexMask); 752 } 753 int channelIndexCount = Integer.bitCount(channelIndexMask); 754 if (mChannelCount == 0) { 755 mChannelCount = channelIndexCount; 756 } else if (mChannelCount != channelIndexCount) { 757 throw new IllegalArgumentException("Channel count must match"); 758 } 759 } 760 761 //-------------- 762 // audio format 763 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 764 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 765 } 766 767 if (!AudioFormat.isValidEncoding(audioFormat)) { 768 throw new IllegalArgumentException("Unsupported audio encoding."); 769 } 770 mAudioFormat = audioFormat; 771 772 //-------------- 773 // audio load mode 774 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 775 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 776 throw new IllegalArgumentException("Invalid mode."); 777 } 778 mDataLoadMode = mode; 779 } 780 781 /** 782 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 783 * @param channelConfig the mask to validate 784 * @return false if the AudioTrack can't be used with such a mask 785 */ 786 private static boolean isMultichannelConfigSupported(int channelConfig) { 787 // check for unsupported channels 788 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 789 loge("Channel configuration features unsupported channels"); 790 return false; 791 } 792 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 793 if (channelCount > CHANNEL_COUNT_MAX) { 794 loge("Channel configuration contains too many channels " + 795 channelCount + ">" + CHANNEL_COUNT_MAX); 796 return false; 797 } 798 // check for unsupported multichannel combinations: 799 // - FL/FR must be present 800 // - L/R channels must be paired (e.g. no single L channel) 801 final int frontPair = 802 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 803 if ((channelConfig & frontPair) != frontPair) { 804 loge("Front channels must be present in multichannel configurations"); 805 return false; 806 } 807 final int backPair = 808 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 809 if ((channelConfig & backPair) != 0) { 810 if ((channelConfig & backPair) != backPair) { 811 loge("Rear channels can't be used independently"); 812 return false; 813 } 814 } 815 final int sidePair = 816 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 817 if ((channelConfig & sidePair) != 0 818 && (channelConfig & sidePair) != sidePair) { 819 loge("Side channels can't be used independently"); 820 return false; 821 } 822 return true; 823 } 824 825 826 // Convenience method for the constructor's audio buffer size check. 827 // preconditions: 828 // mChannelCount is valid 829 // mAudioFormat is valid 830 // postcondition: 831 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) 832 private void audioBuffSizeCheck(int audioBufferSize) { 833 // NB: this section is only valid with PCM data. 834 // To update when supporting compressed formats 835 int frameSizeInBytes; 836 if (AudioFormat.isEncodingLinearPcm(mAudioFormat)) { 837 frameSizeInBytes = mChannelCount 838 * (AudioFormat.getBytesPerSample(mAudioFormat)); 839 } else { 840 frameSizeInBytes = 1; 841 } 842 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 843 throw new IllegalArgumentException("Invalid audio buffer size."); 844 } 845 846 mNativeBufferSizeInBytes = audioBufferSize; 847 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 848 } 849 850 851 /** 852 * Releases the native AudioTrack resources. 853 */ 854 public void release() { 855 // even though native_release() stops the native AudioTrack, we need to stop 856 // AudioTrack subclasses too. 857 try { 858 stop(); 859 } catch(IllegalStateException ise) { 860 // don't raise an exception, we're releasing the resources. 861 } 862 native_release(); 863 mState = STATE_UNINITIALIZED; 864 } 865 866 @Override 867 protected void finalize() { 868 native_finalize(); 869 } 870 871 //-------------------------------------------------------------------------- 872 // Getters 873 //-------------------- 874 /** 875 * Returns the minimum gain value, which is the constant 0.0. 876 * Gain values less than 0.0 will be clamped to 0.0. 877 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 878 * @return the minimum value, which is the constant 0.0. 879 */ 880 static public float getMinVolume() { 881 return GAIN_MIN; 882 } 883 884 /** 885 * Returns the maximum gain value, which is greater than or equal to 1.0. 886 * Gain values greater than the maximum will be clamped to the maximum. 887 * <p>The word "volume" in the API name is historical; this is actually a gain. 888 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 889 * corresponds to a gain of 0 dB (sample values left unmodified). 890 * @return the maximum value, which is greater than or equal to 1.0. 891 */ 892 static public float getMaxVolume() { 893 return GAIN_MAX; 894 } 895 896 /** 897 * Returns the configured audio data sample rate in Hz 898 */ 899 public int getSampleRate() { 900 return mSampleRate; 901 } 902 903 /** 904 * Returns the current playback sample rate rate in Hz. 905 */ 906 public int getPlaybackRate() { 907 return native_get_playback_rate(); 908 } 909 910 /** 911 * Returns the current playback parameters. 912 * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters 913 * @return current {@link PlaybackParams}. 914 * @throws IllegalStateException if track is not initialized. 915 */ 916 public @NonNull PlaybackParams getPlaybackParams() { 917 return native_get_playback_params(); 918 } 919 920 /** 921 * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT}, 922 * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}. 923 */ 924 public int getAudioFormat() { 925 return mAudioFormat; 926 } 927 928 /** 929 * Returns the type of audio stream this AudioTrack is configured for. 930 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 931 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 932 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 933 * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}. 934 */ 935 public int getStreamType() { 936 return mStreamType; 937 } 938 939 /** 940 * Returns the configured channel position mask. 941 * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO}, 942 * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}. 943 * This method may return {@link AudioFormat#CHANNEL_INVALID} if 944 * a channel index mask is used. Consider 945 * {@link #getFormat()} instead, to obtain an {@link AudioFormat}, 946 * which contains both the channel position mask and the channel index mask. 947 */ 948 public int getChannelConfiguration() { 949 return mChannelConfiguration; 950 } 951 952 /** 953 * Returns the configured <code>AudioTrack</code> format. 954 * @return an {@link AudioFormat} containing the 955 * <code>AudioTrack</code> parameters at the time of configuration. 956 */ 957 public @NonNull AudioFormat getFormat() { 958 AudioFormat.Builder builder = new AudioFormat.Builder() 959 .setSampleRate(mSampleRate) 960 .setEncoding(mAudioFormat); 961 if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) { 962 builder.setChannelMask(mChannelConfiguration); 963 } 964 if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) { 965 builder.setChannelIndexMask(mChannelIndexMask); 966 } 967 return builder.build(); 968 } 969 970 /** 971 * Returns the configured number of channels. 972 */ 973 public int getChannelCount() { 974 return mChannelCount; 975 } 976 977 /** 978 * Returns the state of the AudioTrack instance. This is useful after the 979 * AudioTrack instance has been created to check if it was initialized 980 * properly. This ensures that the appropriate resources have been acquired. 981 * @see #STATE_INITIALIZED 982 * @see #STATE_NO_STATIC_DATA 983 * @see #STATE_UNINITIALIZED 984 */ 985 public int getState() { 986 return mState; 987 } 988 989 /** 990 * Returns the playback state of the AudioTrack instance. 991 * @see #PLAYSTATE_STOPPED 992 * @see #PLAYSTATE_PAUSED 993 * @see #PLAYSTATE_PLAYING 994 */ 995 public int getPlayState() { 996 synchronized (mPlayStateLock) { 997 return mPlayState; 998 } 999 } 1000 1001 /** 1002 * Returns the "native frame count" of the <code>AudioTrack</code> buffer. 1003 * <p> If the track's creation mode is {@link #MODE_STATIC}, 1004 * it is equal to the specified bufferSizeInBytes on construction, converted to frame units. 1005 * A static track's native frame count will not change. 1006 * <p> If the track's creation mode is {@link #MODE_STREAM}, 1007 * it is greater than or equal to the specified bufferSizeInBytes converted to frame units. 1008 * For streaming tracks, this value may be rounded up to a larger value if needed by 1009 * the target output sink, and 1010 * if the track is subsequently routed to a different output sink, the native 1011 * frame count may enlarge to accommodate. 1012 * See also {@link AudioManager#getProperty(String)} for key 1013 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1014 * @return current size in frames of the audio track buffer. 1015 * @throws IllegalStateException 1016 */ 1017 public int getNativeFrameCount() throws IllegalStateException { 1018 return native_get_native_frame_count(); 1019 } 1020 1021 /** 1022 * Returns marker position expressed in frames. 1023 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 1024 * or zero if marker is disabled. 1025 */ 1026 public int getNotificationMarkerPosition() { 1027 return native_get_marker_pos(); 1028 } 1029 1030 /** 1031 * Returns the notification update period expressed in frames. 1032 * Zero means that no position update notifications are being delivered. 1033 */ 1034 public int getPositionNotificationPeriod() { 1035 return native_get_pos_update_period(); 1036 } 1037 1038 /** 1039 * Returns the playback head position expressed in frames. 1040 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 1041 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 1042 * This is a continuously advancing counter. It will wrap (overflow) periodically, 1043 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 1044 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 1045 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 1046 * the total number of frames played since reset, 1047 * <i>not</i> the current offset within the buffer. 1048 */ 1049 public int getPlaybackHeadPosition() { 1050 return native_get_position(); 1051 } 1052 1053 /** 1054 * Returns this track's estimated latency in milliseconds. This includes the latency due 1055 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 1056 * 1057 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 1058 * a better solution. 1059 * @hide 1060 */ 1061 public int getLatency() { 1062 return native_get_latency(); 1063 } 1064 1065 /** 1066 * Returns the output sample rate in Hz for the specified stream type. 1067 */ 1068 static public int getNativeOutputSampleRate(int streamType) { 1069 return native_get_output_sample_rate(streamType); 1070 } 1071 1072 /** 1073 * Returns the minimum buffer size required for the successful creation of an AudioTrack 1074 * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't 1075 * guarantee a smooth playback under load, and higher values should be chosen according to 1076 * the expected frequency at which the buffer will be refilled with additional data to play. 1077 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 1078 * to a higher value than the initial source sample rate, be sure to configure the buffer size 1079 * based on the highest planned sample rate. 1080 * @param sampleRateInHz the source sample rate expressed in Hz. 1081 * @param channelConfig describes the configuration of the audio channels. 1082 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 1083 * {@link AudioFormat#CHANNEL_OUT_STEREO} 1084 * @param audioFormat the format in which the audio data is represented. 1085 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 1086 * {@link AudioFormat#ENCODING_PCM_8BIT}, 1087 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1088 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 1089 * or {@link #ERROR} if unable to query for output properties, 1090 * or the minimum buffer size expressed in bytes. 1091 */ 1092 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 1093 int channelCount = 0; 1094 switch(channelConfig) { 1095 case AudioFormat.CHANNEL_OUT_MONO: 1096 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1097 channelCount = 1; 1098 break; 1099 case AudioFormat.CHANNEL_OUT_STEREO: 1100 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1101 channelCount = 2; 1102 break; 1103 default: 1104 if (!isMultichannelConfigSupported(channelConfig)) { 1105 loge("getMinBufferSize(): Invalid channel configuration."); 1106 return ERROR_BAD_VALUE; 1107 } else { 1108 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1109 } 1110 } 1111 1112 if (!AudioFormat.isValidEncoding(audioFormat)) { 1113 loge("getMinBufferSize(): Invalid audio format."); 1114 return ERROR_BAD_VALUE; 1115 } 1116 1117 // sample rate, note these values are subject to change 1118 if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) { 1119 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 1120 return ERROR_BAD_VALUE; 1121 } 1122 1123 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 1124 if (size <= 0) { 1125 loge("getMinBufferSize(): error querying hardware"); 1126 return ERROR; 1127 } 1128 else { 1129 return size; 1130 } 1131 } 1132 1133 /** 1134 * Returns the audio session ID. 1135 * 1136 * @return the ID of the audio session this AudioTrack belongs to. 1137 */ 1138 public int getAudioSessionId() { 1139 return mSessionId; 1140 } 1141 1142 /** 1143 * Poll for a timestamp on demand. 1144 * <p> 1145 * If you need to track timestamps during initial warmup or after a routing or mode change, 1146 * you should request a new timestamp once per second until the reported timestamps 1147 * show that the audio clock is stable. 1148 * Thereafter, query for a new timestamp approximately once every 10 seconds to once per minute. 1149 * Calling this method more often is inefficient. 1150 * It is also counter-productive to call this method more often than recommended, 1151 * because the short-term differences between successive timestamp reports are not meaningful. 1152 * If you need a high-resolution mapping between frame position and presentation time, 1153 * consider implementing that at application level, based on low-resolution timestamps. 1154 * <p> 1155 * The audio data at the returned position may either already have been 1156 * presented, or may have not yet been presented but is committed to be presented. 1157 * It is not possible to request the time corresponding to a particular position, 1158 * or to request the (fractional) position corresponding to a particular time. 1159 * If you need such features, consider implementing them at application level. 1160 * 1161 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1162 * and owned by caller. 1163 * @return true if a timestamp is available, or false if no timestamp is available. 1164 * If a timestamp if available, 1165 * the AudioTimestamp instance is filled in with a position in frame units, together 1166 * with the estimated time when that frame was presented or is committed to 1167 * be presented. 1168 * In the case that no timestamp is available, any supplied instance is left unaltered. 1169 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 1170 * or during and immediately after a route change. 1171 */ 1172 // Add this text when the "on new timestamp" API is added: 1173 // Use if you need to get the most recent timestamp outside of the event callback handler. 1174 public boolean getTimestamp(AudioTimestamp timestamp) 1175 { 1176 if (timestamp == null) { 1177 throw new IllegalArgumentException(); 1178 } 1179 // It's unfortunate, but we have to either create garbage every time or use synchronized 1180 long[] longArray = new long[2]; 1181 int ret = native_get_timestamp(longArray); 1182 if (ret != SUCCESS) { 1183 return false; 1184 } 1185 timestamp.framePosition = longArray[0]; 1186 timestamp.nanoTime = longArray[1]; 1187 return true; 1188 } 1189 1190 1191 //-------------------------------------------------------------------------- 1192 // Initialization / configuration 1193 //-------------------- 1194 /** 1195 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1196 * for each periodic playback head position update. 1197 * Notifications will be received in the same thread as the one in which the AudioTrack 1198 * instance was created. 1199 * @param listener 1200 */ 1201 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 1202 setPlaybackPositionUpdateListener(listener, null); 1203 } 1204 1205 /** 1206 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1207 * for each periodic playback head position update. 1208 * Use this method to receive AudioTrack events in the Handler associated with another 1209 * thread than the one in which you created the AudioTrack instance. 1210 * @param listener 1211 * @param handler the Handler that will receive the event notification messages. 1212 */ 1213 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 1214 Handler handler) { 1215 if (listener != null) { 1216 mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler); 1217 } else { 1218 mEventHandlerDelegate = null; 1219 } 1220 } 1221 1222 1223 private static float clampGainOrLevel(float gainOrLevel) { 1224 if (Float.isNaN(gainOrLevel)) { 1225 throw new IllegalArgumentException(); 1226 } 1227 if (gainOrLevel < GAIN_MIN) { 1228 gainOrLevel = GAIN_MIN; 1229 } else if (gainOrLevel > GAIN_MAX) { 1230 gainOrLevel = GAIN_MAX; 1231 } 1232 return gainOrLevel; 1233 } 1234 1235 1236 /** 1237 * Sets the specified left and right output gain values on the AudioTrack. 1238 * <p>Gain values are clamped to the closed interval [0.0, max] where 1239 * max is the value of {@link #getMaxVolume}. 1240 * A value of 0.0 results in zero gain (silence), and 1241 * a value of 1.0 means unity gain (signal unchanged). 1242 * The default value is 1.0 meaning unity gain. 1243 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1244 * @param leftGain output gain for the left channel. 1245 * @param rightGain output gain for the right channel 1246 * @return error code or success, see {@link #SUCCESS}, 1247 * {@link #ERROR_INVALID_OPERATION} 1248 * @deprecated Applications should use {@link #setVolume} instead, as it 1249 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1250 */ 1251 public int setStereoVolume(float leftGain, float rightGain) { 1252 if (isRestricted()) { 1253 return SUCCESS; 1254 } 1255 if (mState == STATE_UNINITIALIZED) { 1256 return ERROR_INVALID_OPERATION; 1257 } 1258 1259 leftGain = clampGainOrLevel(leftGain); 1260 rightGain = clampGainOrLevel(rightGain); 1261 1262 native_setVolume(leftGain, rightGain); 1263 1264 return SUCCESS; 1265 } 1266 1267 1268 /** 1269 * Sets the specified output gain value on all channels of this track. 1270 * <p>Gain values are clamped to the closed interval [0.0, max] where 1271 * max is the value of {@link #getMaxVolume}. 1272 * A value of 0.0 results in zero gain (silence), and 1273 * a value of 1.0 means unity gain (signal unchanged). 1274 * The default value is 1.0 meaning unity gain. 1275 * <p>This API is preferred over {@link #setStereoVolume}, as it 1276 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1277 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1278 * @param gain output gain for all channels. 1279 * @return error code or success, see {@link #SUCCESS}, 1280 * {@link #ERROR_INVALID_OPERATION} 1281 */ 1282 public int setVolume(float gain) { 1283 return setStereoVolume(gain, gain); 1284 } 1285 1286 1287 /** 1288 * Sets the playback sample rate for this track. This sets the sampling rate at which 1289 * the audio data will be consumed and played back 1290 * (as set by the sampleRateInHz parameter in the 1291 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 1292 * not the original sampling rate of the 1293 * content. For example, setting it to half the sample rate of the content will cause the 1294 * playback to last twice as long, but will also result in a pitch shift down by one octave. 1295 * The valid sample rate range is from 1 Hz to twice the value returned by 1296 * {@link #getNativeOutputSampleRate(int)}. 1297 * Use {@link #setPlaybackParams(PlaybackParams)} for speed control. 1298 * @param sampleRateInHz the sample rate expressed in Hz 1299 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1300 * {@link #ERROR_INVALID_OPERATION} 1301 */ 1302 public int setPlaybackRate(int sampleRateInHz) { 1303 if (mState != STATE_INITIALIZED) { 1304 return ERROR_INVALID_OPERATION; 1305 } 1306 if (sampleRateInHz <= 0) { 1307 return ERROR_BAD_VALUE; 1308 } 1309 return native_set_playback_rate(sampleRateInHz); 1310 } 1311 1312 1313 /** 1314 * Sets the playback parameters. 1315 * This method returns failure if it cannot apply the playback parameters. 1316 * One possible cause is that the parameters for speed or pitch are out of range. 1317 * Another possible cause is that the <code>AudioTrack</code> is streaming 1318 * (see {@link #MODE_STREAM}) and the 1319 * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer 1320 * on configuration must be larger than the speed multiplied by the minimum size 1321 * {@link #getMinBufferSize(int, int, int)}) to allow proper playback. 1322 * @param params see {@link PlaybackParams}. In particular, 1323 * speed, pitch, and audio mode should be set. 1324 * @throws IllegalArgumentException if the parameters are invalid or not accepted. 1325 * @throws IllegalStateException if track is not initialized. 1326 */ 1327 public void setPlaybackParams(@NonNull PlaybackParams params) { 1328 if (params == null) { 1329 throw new IllegalArgumentException("params is null"); 1330 } 1331 native_set_playback_params(params); 1332 } 1333 1334 1335 /** 1336 * Sets the position of the notification marker. At most one marker can be active. 1337 * @param markerInFrames marker position in wrapping frame units similar to 1338 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 1339 * To set a marker at a position which would appear as zero due to wraparound, 1340 * a workaround is to use a non-zero position near zero, such as -1 or 1. 1341 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1342 * {@link #ERROR_INVALID_OPERATION} 1343 */ 1344 public int setNotificationMarkerPosition(int markerInFrames) { 1345 if (mState == STATE_UNINITIALIZED) { 1346 return ERROR_INVALID_OPERATION; 1347 } 1348 return native_set_marker_pos(markerInFrames); 1349 } 1350 1351 1352 /** 1353 * Sets the period for the periodic notification event. 1354 * @param periodInFrames update period expressed in frames. 1355 * Zero period means no position updates. A negative period is not allowed. 1356 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 1357 */ 1358 public int setPositionNotificationPeriod(int periodInFrames) { 1359 if (mState == STATE_UNINITIALIZED) { 1360 return ERROR_INVALID_OPERATION; 1361 } 1362 return native_set_pos_update_period(periodInFrames); 1363 } 1364 1365 1366 /** 1367 * Sets the playback head position within the static buffer. 1368 * The track must be stopped or paused for the position to be changed, 1369 * and must use the {@link #MODE_STATIC} mode. 1370 * @param positionInFrames playback head position within buffer, expressed in frames. 1371 * Zero corresponds to start of buffer. 1372 * The position must not be greater than the buffer size in frames, or negative. 1373 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 1374 * the position values have different meanings. 1375 * <br> 1376 * If looping is currently enabled and the new position is greater than or equal to the 1377 * loop end marker, the behavior varies by API level: 1378 * as of {@link android.os.Build.VERSION_CODES#MNC}, 1379 * the looping is first disabled and then the position is set. 1380 * For earlier API levels, the behavior is unspecified. 1381 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1382 * {@link #ERROR_INVALID_OPERATION} 1383 */ 1384 public int setPlaybackHeadPosition(int positionInFrames) { 1385 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1386 getPlayState() == PLAYSTATE_PLAYING) { 1387 return ERROR_INVALID_OPERATION; 1388 } 1389 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 1390 return ERROR_BAD_VALUE; 1391 } 1392 return native_set_position(positionInFrames); 1393 } 1394 1395 /** 1396 * Sets the loop points and the loop count. The loop can be infinite. 1397 * Similarly to setPlaybackHeadPosition, 1398 * the track must be stopped or paused for the loop points to be changed, 1399 * and must use the {@link #MODE_STATIC} mode. 1400 * @param startInFrames loop start marker expressed in frames. 1401 * Zero corresponds to start of buffer. 1402 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 1403 * @param endInFrames loop end marker expressed in frames. 1404 * The total buffer size in frames corresponds to end of buffer. 1405 * The end marker must not be greater than the buffer size in frames. 1406 * For looping, the end marker must not be less than or equal to the start marker, 1407 * but to disable looping 1408 * it is permitted for start marker, end marker, and loop count to all be 0. 1409 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 1410 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 1411 * support, 1412 * {@link #ERROR_BAD_VALUE} is returned. 1413 * The loop range is the interval [startInFrames, endInFrames). 1414 * <br> 1415 * As of {@link android.os.Build.VERSION_CODES#MNC}, the position is left unchanged, 1416 * unless it is greater than or equal to the loop end marker, in which case 1417 * it is forced to the loop start marker. 1418 * For earlier API levels, the effect on position is unspecified. 1419 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 1420 * A value of -1 means infinite looping, and 0 disables looping. 1421 * A value of positive N means to "loop" (go back) N times. For example, 1422 * a value of one means to play the region two times in total. 1423 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1424 * {@link #ERROR_INVALID_OPERATION} 1425 */ 1426 public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) { 1427 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1428 getPlayState() == PLAYSTATE_PLAYING) { 1429 return ERROR_INVALID_OPERATION; 1430 } 1431 if (loopCount == 0) { 1432 ; // explicitly allowed as an exception to the loop region range check 1433 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 1434 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 1435 return ERROR_BAD_VALUE; 1436 } 1437 return native_set_loop(startInFrames, endInFrames, loopCount); 1438 } 1439 1440 /** 1441 * Sets the initialization state of the instance. This method was originally intended to be used 1442 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 1443 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 1444 * @param state the state of the AudioTrack instance 1445 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 1446 */ 1447 @Deprecated 1448 protected void setState(int state) { 1449 mState = state; 1450 } 1451 1452 1453 //--------------------------------------------------------- 1454 // Transport control methods 1455 //-------------------- 1456 /** 1457 * Starts playing an AudioTrack. 1458 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 1459 * the {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 1460 * or {@link #write(float[], int, int, int)} methods. 1461 * If the mode is {@link #MODE_STREAM}, you can optionally prime the 1462 * output buffer by writing up to bufferSizeInBytes (from constructor) before starting. 1463 * This priming will avoid an immediate underrun, but is not required. 1464 * 1465 * @throws IllegalStateException 1466 */ 1467 public void play() 1468 throws IllegalStateException { 1469 if (mState != STATE_INITIALIZED) { 1470 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 1471 } 1472 if (isRestricted()) { 1473 setVolume(0); 1474 } 1475 synchronized(mPlayStateLock) { 1476 native_start(); 1477 mPlayState = PLAYSTATE_PLAYING; 1478 } 1479 } 1480 1481 private boolean isRestricted() { 1482 if ((mAttributes.getFlags() & AudioAttributes.FLAG_BYPASS_INTERRUPTION_POLICY) != 0) { 1483 return false; 1484 } 1485 try { 1486 final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType); 1487 final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage, 1488 Process.myUid(), ActivityThread.currentPackageName()); 1489 return mode != AppOpsManager.MODE_ALLOWED; 1490 } catch (RemoteException e) { 1491 return false; 1492 } 1493 } 1494 1495 /** 1496 * Stops playing the audio data. 1497 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 1498 * after the last buffer that was written has been played. For an immediate stop, use 1499 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 1500 * back yet. 1501 * @throws IllegalStateException 1502 */ 1503 public void stop() 1504 throws IllegalStateException { 1505 if (mState != STATE_INITIALIZED) { 1506 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 1507 } 1508 1509 // stop playing 1510 synchronized(mPlayStateLock) { 1511 native_stop(); 1512 mPlayState = PLAYSTATE_STOPPED; 1513 mAvSyncHeader = null; 1514 mAvSyncBytesRemaining = 0; 1515 } 1516 } 1517 1518 /** 1519 * Pauses the playback of the audio data. Data that has not been played 1520 * back will not be discarded. Subsequent calls to {@link #play} will play 1521 * this data back. See {@link #flush()} to discard this data. 1522 * 1523 * @throws IllegalStateException 1524 */ 1525 public void pause() 1526 throws IllegalStateException { 1527 if (mState != STATE_INITIALIZED) { 1528 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 1529 } 1530 //logd("pause()"); 1531 1532 // pause playback 1533 synchronized(mPlayStateLock) { 1534 native_pause(); 1535 mPlayState = PLAYSTATE_PAUSED; 1536 } 1537 } 1538 1539 1540 //--------------------------------------------------------- 1541 // Audio data supply 1542 //-------------------- 1543 1544 /** 1545 * Flushes the audio data currently queued for playback. Any data that has 1546 * been written but not yet presented will be discarded. No-op if not stopped or paused, 1547 * or if the track's creation mode is not {@link #MODE_STREAM}. 1548 * <BR> Note that although data written but not yet presented is discarded, there is no 1549 * guarantee that all of the buffer space formerly used by that data 1550 * is available for a subsequent write. 1551 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 1552 * less than or equal to the total buffer size 1553 * may return a short actual transfer count. 1554 */ 1555 public void flush() { 1556 if (mState == STATE_INITIALIZED) { 1557 // flush the data in native layer 1558 native_flush(); 1559 mAvSyncHeader = null; 1560 mAvSyncBytesRemaining = 0; 1561 } 1562 1563 } 1564 1565 /** 1566 * Writes the audio data to the audio sink for playback (streaming mode), 1567 * or copies audio data for later playback (static buffer mode). 1568 * The format specified in the AudioTrack constructor should be 1569 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1570 * In streaming mode, will block until all data has been written to the audio sink. 1571 * In static buffer mode, copies the data to the buffer starting at offset 0. 1572 * Note that the actual playback of this data might occur after this function 1573 * returns. This function is thread safe with respect to {@link #stop} calls, 1574 * in which case all of the specified data might not be written to the audio sink. 1575 * 1576 * @param audioData the array that holds the data to play. 1577 * @param offsetInBytes the offset expressed in bytes in audioData where the data to play 1578 * starts. 1579 * @param sizeInBytes the number of bytes to read in audioData after the offset. 1580 * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION} 1581 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1582 * the parameters don't resolve to valid data and indexes, or 1583 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1584 * needs to be recreated. 1585 */ 1586 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) { 1587 return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING); 1588 } 1589 1590 /** 1591 * Writes the audio data to the audio sink for playback (streaming mode), 1592 * or copies audio data for later playback (static buffer mode). 1593 * The format specified in the AudioTrack constructor should be 1594 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1595 * In streaming mode, will block until all data has been written to the audio sink. 1596 * In static buffer mode, copies the data to the buffer starting at offset 0. 1597 * Note that the actual playback of this data might occur after this function 1598 * returns. This function is thread safe with respect to {@link #stop} calls, 1599 * in which case all of the specified data might not be written to the audio sink. 1600 * 1601 * @param audioData the array that holds the data to play. 1602 * @param offsetInBytes the offset expressed in bytes in audioData where the data to play 1603 * starts. 1604 * @param sizeInBytes the number of bytes to read in audioData after the offset. 1605 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1606 * effect in static mode. 1607 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1608 * to the audio sink. 1609 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1610 * queuing as much audio data for playback as possible without blocking. 1611 * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION} 1612 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1613 * the parameters don't resolve to valid data and indexes, or 1614 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1615 * needs to be recreated. 1616 */ 1617 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes, 1618 @WriteMode int writeMode) { 1619 1620 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1621 return ERROR_INVALID_OPERATION; 1622 } 1623 1624 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1625 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1626 return ERROR_BAD_VALUE; 1627 } 1628 1629 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 1630 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 1631 || (offsetInBytes + sizeInBytes > audioData.length)) { 1632 return ERROR_BAD_VALUE; 1633 } 1634 1635 int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 1636 writeMode == WRITE_BLOCKING); 1637 1638 if ((mDataLoadMode == MODE_STATIC) 1639 && (mState == STATE_NO_STATIC_DATA) 1640 && (ret > 0)) { 1641 // benign race with respect to other APIs that read mState 1642 mState = STATE_INITIALIZED; 1643 } 1644 1645 return ret; 1646 } 1647 1648 /** 1649 * Writes the audio data to the audio sink for playback (streaming mode), 1650 * or copies audio data for later playback (static buffer mode). 1651 * The format specified in the AudioTrack constructor should be 1652 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1653 * In streaming mode, will block until all data has been written to the audio sink. 1654 * In static buffer mode, copies the data to the buffer starting at offset 0. 1655 * Note that the actual playback of this data might occur after this function 1656 * returns. This function is thread safe with respect to {@link #stop} calls, 1657 * in which case all of the specified data might not be written to the audio sink. 1658 * 1659 * @param audioData the array that holds the data to play. 1660 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1661 * starts. 1662 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1663 * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION} 1664 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1665 * the parameters don't resolve to valid data and indexes, or 1666 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1667 * needs to be recreated. 1668 */ 1669 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) { 1670 return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING); 1671 } 1672 1673 /** 1674 * Writes the audio data to the audio sink for playback (streaming mode), 1675 * or copies audio data for later playback (static buffer mode). 1676 * The format specified in the AudioTrack constructor should be 1677 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1678 * In streaming mode, will block until all data has been written to the audio sink. 1679 * In static buffer mode, copies the data to the buffer starting at offset 0. 1680 * Note that the actual playback of this data might occur after this function 1681 * returns. This function is thread safe with respect to {@link #stop} calls, 1682 * in which case all of the specified data might not be written to the audio sink. 1683 * 1684 * @param audioData the array that holds the data to play. 1685 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1686 * starts. 1687 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1688 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1689 * effect in static mode. 1690 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1691 * to the audio sink. 1692 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1693 * queuing as much audio data for playback as possible without blocking. 1694 * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION} 1695 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1696 * the parameters don't resolve to valid data and indexes, or 1697 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1698 * needs to be recreated. 1699 */ 1700 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts, 1701 @WriteMode int writeMode) { 1702 1703 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1704 return ERROR_INVALID_OPERATION; 1705 } 1706 1707 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1708 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1709 return ERROR_BAD_VALUE; 1710 } 1711 1712 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 1713 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 1714 || (offsetInShorts + sizeInShorts > audioData.length)) { 1715 return ERROR_BAD_VALUE; 1716 } 1717 1718 int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat, 1719 writeMode == WRITE_BLOCKING); 1720 1721 if ((mDataLoadMode == MODE_STATIC) 1722 && (mState == STATE_NO_STATIC_DATA) 1723 && (ret > 0)) { 1724 // benign race with respect to other APIs that read mState 1725 mState = STATE_INITIALIZED; 1726 } 1727 1728 return ret; 1729 } 1730 1731 /** 1732 * Writes the audio data to the audio sink for playback (streaming mode), 1733 * or copies audio data for later playback (static buffer mode). 1734 * The format specified in the AudioTrack constructor should be 1735 * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array. 1736 * In static buffer mode, copies the data to the buffer starting at offset 0, 1737 * and the write mode is ignored. 1738 * In streaming mode, the blocking behavior will depend on the write mode. 1739 * <p> 1740 * Note that the actual playback of this data might occur after this function 1741 * returns. This function is thread safe with respect to {@link #stop} calls, 1742 * in which case all of the specified data might not be written to the audio sink. 1743 * <p> 1744 * @param audioData the array that holds the data to play. 1745 * The implementation does not clip for sample values within the nominal range 1746 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 1747 * less than or equal to unity (1.0f), and in the absence of post-processing effects 1748 * that could add energy, such as reverb. For the convenience of applications 1749 * that compute samples using filters with non-unity gain, 1750 * sample values +3 dB beyond the nominal range are permitted. 1751 * However such values may eventually be limited or clipped, depending on various gains 1752 * and later processing in the audio path. Therefore applications are encouraged 1753 * to provide samples values within the nominal range. 1754 * @param offsetInFloats the offset, expressed as a number of floats, 1755 * in audioData where the data to play starts. 1756 * @param sizeInFloats the number of floats to read in audioData after the offset. 1757 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1758 * effect in static mode. 1759 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1760 * to the audio sink. 1761 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1762 * queuing as much audio data for playback as possible without blocking. 1763 * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION} 1764 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1765 * the parameters don't resolve to valid data and indexes, or 1766 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1767 * needs to be recreated. 1768 */ 1769 public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats, 1770 @WriteMode int writeMode) { 1771 1772 if (mState == STATE_UNINITIALIZED) { 1773 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 1774 return ERROR_INVALID_OPERATION; 1775 } 1776 1777 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 1778 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 1779 return ERROR_INVALID_OPERATION; 1780 } 1781 1782 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1783 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1784 return ERROR_BAD_VALUE; 1785 } 1786 1787 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 1788 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 1789 || (offsetInFloats + sizeInFloats > audioData.length)) { 1790 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 1791 return ERROR_BAD_VALUE; 1792 } 1793 1794 int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 1795 writeMode == WRITE_BLOCKING); 1796 1797 if ((mDataLoadMode == MODE_STATIC) 1798 && (mState == STATE_NO_STATIC_DATA) 1799 && (ret > 0)) { 1800 // benign race with respect to other APIs that read mState 1801 mState = STATE_INITIALIZED; 1802 } 1803 1804 return ret; 1805 } 1806 1807 1808 /** 1809 * Writes the audio data to the audio sink for playback (streaming mode), 1810 * or copies audio data for later playback (static buffer mode). 1811 * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write 1812 * mode is ignored. 1813 * In streaming mode, the blocking behavior will depend on the write mode. 1814 * @param audioData the buffer that holds the data to play, starting at the position reported 1815 * by <code>audioData.position()</code>. 1816 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 1817 * have been advanced to reflect the amount of data that was successfully written to 1818 * the AudioTrack. 1819 * @param sizeInBytes number of bytes to write. 1820 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 1821 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1822 * effect in static mode. 1823 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1824 * to the audio sink. 1825 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1826 * queuing as much audio data for playback as possible without blocking. 1827 * @return 0 or a positive number of bytes that were written, or 1828 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 1829 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1830 * needs to be recreated. 1831 */ 1832 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 1833 @WriteMode int writeMode) { 1834 1835 if (mState == STATE_UNINITIALIZED) { 1836 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 1837 return ERROR_INVALID_OPERATION; 1838 } 1839 1840 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1841 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1842 return ERROR_BAD_VALUE; 1843 } 1844 1845 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 1846 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 1847 return ERROR_BAD_VALUE; 1848 } 1849 1850 int ret = 0; 1851 if (audioData.isDirect()) { 1852 ret = native_write_native_bytes(audioData, 1853 audioData.position(), sizeInBytes, mAudioFormat, 1854 writeMode == WRITE_BLOCKING); 1855 } else { 1856 ret = native_write_byte(NioUtils.unsafeArray(audioData), 1857 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 1858 sizeInBytes, mAudioFormat, 1859 writeMode == WRITE_BLOCKING); 1860 } 1861 1862 if ((mDataLoadMode == MODE_STATIC) 1863 && (mState == STATE_NO_STATIC_DATA) 1864 && (ret > 0)) { 1865 // benign race with respect to other APIs that read mState 1866 mState = STATE_INITIALIZED; 1867 } 1868 1869 if (ret > 0) { 1870 audioData.position(audioData.position() + ret); 1871 } 1872 1873 return ret; 1874 } 1875 1876 /** 1877 * Writes the audio data to the audio sink for playback (streaming mode) on a HW_AV_SYNC track. 1878 * In streaming mode, the blocking behavior will depend on the write mode. 1879 * @param audioData the buffer that holds the data to play, starting at the position reported 1880 * by <code>audioData.position()</code>. 1881 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 1882 * have been advanced to reflect the amount of data that was successfully written to 1883 * the AudioTrack. 1884 * @param sizeInBytes number of bytes to write. 1885 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 1886 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. 1887 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1888 * to the audio sink. 1889 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1890 * queuing as much audio data for playback as possible without blocking. 1891 * @param timestamp The timestamp of the first decodable audio frame in the provided audioData. 1892 * @return 0 or a positive number of bytes that were written, or 1893 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 1894 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1895 * needs to be recreated. 1896 */ 1897 public int write(ByteBuffer audioData, int sizeInBytes, 1898 @WriteMode int writeMode, long timestamp) { 1899 1900 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) { 1901 Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts..."); 1902 return write(audioData, sizeInBytes, writeMode); 1903 } 1904 1905 if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 1906 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 1907 return ERROR_BAD_VALUE; 1908 } 1909 1910 // create timestamp header if none exists 1911 if (mAvSyncHeader == null) { 1912 mAvSyncHeader = ByteBuffer.allocate(16); 1913 mAvSyncHeader.order(ByteOrder.BIG_ENDIAN); 1914 mAvSyncHeader.putInt(0x55550001); 1915 mAvSyncHeader.putInt(sizeInBytes); 1916 mAvSyncHeader.putLong(timestamp); 1917 mAvSyncHeader.position(0); 1918 mAvSyncBytesRemaining = sizeInBytes; 1919 } 1920 1921 // write timestamp header if not completely written already 1922 int ret = 0; 1923 if (mAvSyncHeader.remaining() != 0) { 1924 ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode); 1925 if (ret < 0) { 1926 Log.e(TAG, "AudioTrack.write() could not write timestamp header!"); 1927 mAvSyncHeader = null; 1928 mAvSyncBytesRemaining = 0; 1929 return ret; 1930 } 1931 if (mAvSyncHeader.remaining() > 0) { 1932 Log.v(TAG, "AudioTrack.write() partial timestamp header written."); 1933 return 0; 1934 } 1935 } 1936 1937 // write audio data 1938 int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes); 1939 ret = write(audioData, sizeToWrite, writeMode); 1940 if (ret < 0) { 1941 Log.e(TAG, "AudioTrack.write() could not write audio data!"); 1942 mAvSyncHeader = null; 1943 mAvSyncBytesRemaining = 0; 1944 return ret; 1945 } 1946 1947 mAvSyncBytesRemaining -= ret; 1948 if (mAvSyncBytesRemaining == 0) { 1949 mAvSyncHeader = null; 1950 } 1951 1952 return ret; 1953 } 1954 1955 1956 /** 1957 * Sets the playback head position within the static buffer to zero, 1958 * that is it rewinds to start of static buffer. 1959 * The track must be stopped or paused, and 1960 * the track's creation mode must be {@link #MODE_STATIC}. 1961 * <p> 1962 * As of {@link android.os.Build.VERSION_CODES#MNC}, also resets the value returned by 1963 * {@link #getPlaybackHeadPosition()} to zero. 1964 * For earlier API levels, the reset behavior is unspecified. 1965 * <p> 1966 * Use {@link #setPlaybackHeadPosition(int)} with a zero position 1967 * if the reset of <code>getPlaybackHeadPosition()</code> is not needed. 1968 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1969 * {@link #ERROR_INVALID_OPERATION} 1970 */ 1971 public int reloadStaticData() { 1972 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 1973 return ERROR_INVALID_OPERATION; 1974 } 1975 return native_reload_static(); 1976 } 1977 1978 //-------------------------------------------------------------------------- 1979 // Audio effects management 1980 //-------------------- 1981 1982 /** 1983 * Attaches an auxiliary effect to the audio track. A typical auxiliary 1984 * effect is a reverberation effect which can be applied on any sound source 1985 * that directs a certain amount of its energy to this effect. This amount 1986 * is defined by setAuxEffectSendLevel(). 1987 * {@see #setAuxEffectSendLevel(float)}. 1988 * <p>After creating an auxiliary effect (e.g. 1989 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 1990 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 1991 * this method to attach the audio track to the effect. 1992 * <p>To detach the effect from the audio track, call this method with a 1993 * null effect id. 1994 * 1995 * @param effectId system wide unique id of the effect to attach 1996 * @return error code or success, see {@link #SUCCESS}, 1997 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 1998 */ 1999 public int attachAuxEffect(int effectId) { 2000 if (mState == STATE_UNINITIALIZED) { 2001 return ERROR_INVALID_OPERATION; 2002 } 2003 return native_attachAuxEffect(effectId); 2004 } 2005 2006 /** 2007 * Sets the send level of the audio track to the attached auxiliary effect 2008 * {@link #attachAuxEffect(int)}. Effect levels 2009 * are clamped to the closed interval [0.0, max] where 2010 * max is the value of {@link #getMaxVolume}. 2011 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 2012 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 2013 * this method must be called for the effect to be applied. 2014 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 2015 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 2016 * so an appropriate conversion from linear UI input x to level is: 2017 * x == 0 -> level = 0 2018 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 2019 * 2020 * @param level linear send level 2021 * @return error code or success, see {@link #SUCCESS}, 2022 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 2023 */ 2024 public int setAuxEffectSendLevel(float level) { 2025 if (isRestricted()) { 2026 return SUCCESS; 2027 } 2028 if (mState == STATE_UNINITIALIZED) { 2029 return ERROR_INVALID_OPERATION; 2030 } 2031 level = clampGainOrLevel(level); 2032 int err = native_setAuxEffectSendLevel(level); 2033 return err == 0 ? SUCCESS : ERROR; 2034 } 2035 2036 //-------------------------------------------------------------------------- 2037 // Explicit Routing 2038 //-------------------- 2039 private AudioDeviceInfo mPreferredDevice = null; 2040 2041 /** 2042 * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route 2043 * the output from this AudioTrack. 2044 * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink. 2045 * If deviceInfo is null, default routing is restored. 2046 * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and 2047 * does not correspond to a valid audio output device. 2048 */ 2049 public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) { 2050 // Do some validation.... 2051 if (deviceInfo != null && !deviceInfo.isSink()) { 2052 return false; 2053 } 2054 int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0; 2055 boolean status = native_setOutputDevice(preferredDeviceId); 2056 if (status == true) { 2057 synchronized (this) { 2058 mPreferredDevice = deviceInfo; 2059 } 2060 } 2061 return status; 2062 } 2063 2064 /** 2065 * Returns the selected output specified by {@link #setPreferredDevice}. Note that this 2066 * is not guaranteed to correspond to the actual device being used for playback. 2067 */ 2068 public AudioDeviceInfo getPreferredDevice() { 2069 synchronized (this) { 2070 return mPreferredDevice; 2071 } 2072 } 2073 2074 //-------------------------------------------------------------------------- 2075 // (Re)Routing Info 2076 //-------------------- 2077 public interface OnRoutingChangedListener { 2078 /** 2079 * Called when the routing of an AudioTrack changes from either and explicit or 2080 * policy rerouting. 2081 */ 2082 public void onRoutingChanged(AudioTrack audioTrack); 2083 } 2084 2085 /** 2086 * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack. 2087 */ 2088 public AudioDeviceInfo getRoutedDevice() { 2089 int deviceId = native_getRoutedDeviceId(); 2090 if (deviceId == 0) { 2091 return null; 2092 } 2093 AudioDeviceInfo[] devices = 2094 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS); 2095 for (int i = 0; i < devices.length; i++) { 2096 if (devices[i].getId() == deviceId) { 2097 return devices[i]; 2098 } 2099 } 2100 return null; 2101 } 2102 2103 /** 2104 * The message sent to apps when the routing of this AudioTrack changes if they provide 2105 * a {#link Handler} object to addOnRoutingChangedListener(). 2106 */ 2107 private ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate> 2108 mRoutingChangeListeners = 2109 new ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate>(); 2110 2111 /** 2112 * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes 2113 * on this AudioTrack. 2114 */ 2115 public void addOnRoutingChangedListener(OnRoutingChangedListener listener, 2116 android.os.Handler handler) { 2117 if (listener != null && !mRoutingChangeListeners.containsKey(listener)) { 2118 synchronized (mRoutingChangeListeners) { 2119 if (mRoutingChangeListeners.size() == 0) { 2120 native_enableDeviceCallback(); 2121 } 2122 mRoutingChangeListeners.put( 2123 listener, new NativeRoutingEventHandlerDelegate(this, listener, handler)); 2124 } 2125 } 2126 } 2127 2128 /** 2129 * Removes an {@link OnRoutingChangedListener} which has been previously added 2130 * to receive notifications of changes to the set of connected audio devices. 2131 */ 2132 public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) { 2133 synchronized (mRoutingChangeListeners) { 2134 if (mRoutingChangeListeners.containsKey(listener)) { 2135 mRoutingChangeListeners.remove(listener); 2136 } 2137 if (mRoutingChangeListeners.size() == 0) { 2138 native_disableDeviceCallback(); 2139 } 2140 } 2141 } 2142 2143 /** 2144 * Sends device list change notification to all listeners. 2145 */ 2146 private void broadcastRoutingChange() { 2147 Collection<NativeRoutingEventHandlerDelegate> values; 2148 synchronized (mRoutingChangeListeners) { 2149 values = mRoutingChangeListeners.values(); 2150 } 2151 AudioManager.resetAudioPortGeneration(); 2152 for(NativeRoutingEventHandlerDelegate delegate : values) { 2153 Handler handler = delegate.getHandler(); 2154 if (handler != null) { 2155 handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE); 2156 } 2157 } 2158 } 2159 2160 //--------------------------------------------------------- 2161 // Interface definitions 2162 //-------------------- 2163 /** 2164 * Interface definition for a callback to be invoked when the playback head position of 2165 * an AudioTrack has reached a notification marker or has increased by a certain period. 2166 */ 2167 public interface OnPlaybackPositionUpdateListener { 2168 /** 2169 * Called on the listener to notify it that the previously set marker has been reached 2170 * by the playback head. 2171 */ 2172 void onMarkerReached(AudioTrack track); 2173 2174 /** 2175 * Called on the listener to periodically notify it that the playback head has reached 2176 * a multiple of the notification period. 2177 */ 2178 void onPeriodicNotification(AudioTrack track); 2179 } 2180 2181 //--------------------------------------------------------- 2182 // Inner classes 2183 //-------------------- 2184 /** 2185 * Helper class to handle the forwarding of native events to the appropriate listener 2186 * (potentially) handled in a different thread 2187 */ 2188 private class NativePositionEventHandlerDelegate { 2189 private final Handler mHandler; 2190 2191 NativePositionEventHandlerDelegate(final AudioTrack track, 2192 final OnPlaybackPositionUpdateListener listener, 2193 Handler handler) { 2194 // find the looper for our new event handler 2195 Looper looper; 2196 if (handler != null) { 2197 looper = handler.getLooper(); 2198 } else { 2199 // no given handler, use the looper the AudioTrack was created in 2200 looper = mInitializationLooper; 2201 } 2202 2203 // construct the event handler with this looper 2204 if (looper != null) { 2205 // implement the event handler delegate 2206 mHandler = new Handler(looper) { 2207 @Override 2208 public void handleMessage(Message msg) { 2209 if (track == null) { 2210 return; 2211 } 2212 switch(msg.what) { 2213 case NATIVE_EVENT_MARKER: 2214 if (listener != null) { 2215 listener.onMarkerReached(track); 2216 } 2217 break; 2218 case NATIVE_EVENT_NEW_POS: 2219 if (listener != null) { 2220 listener.onPeriodicNotification(track); 2221 } 2222 break; 2223 default: 2224 loge("Unknown native event type: " + msg.what); 2225 break; 2226 } 2227 } 2228 }; 2229 } else { 2230 mHandler = null; 2231 } 2232 } 2233 2234 Handler getHandler() { 2235 return mHandler; 2236 } 2237 } 2238 2239 /** 2240 * Helper class to handle the forwarding of native events to the appropriate listener 2241 * (potentially) handled in a different thread 2242 */ 2243 private class NativeRoutingEventHandlerDelegate { 2244 private final Handler mHandler; 2245 2246 NativeRoutingEventHandlerDelegate(final AudioTrack track, 2247 final OnRoutingChangedListener listener, 2248 Handler handler) { 2249 // find the looper for our new event handler 2250 Looper looper; 2251 if (handler != null) { 2252 looper = handler.getLooper(); 2253 } else { 2254 // no given handler, use the looper the AudioTrack was created in 2255 looper = mInitializationLooper; 2256 } 2257 2258 // construct the event handler with this looper 2259 if (looper != null) { 2260 // implement the event handler delegate 2261 mHandler = new Handler(looper) { 2262 @Override 2263 public void handleMessage(Message msg) { 2264 if (track == null) { 2265 return; 2266 } 2267 switch(msg.what) { 2268 case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE: 2269 if (listener != null) { 2270 listener.onRoutingChanged(track); 2271 } 2272 break; 2273 default: 2274 loge("Unknown native event type: " + msg.what); 2275 break; 2276 } 2277 } 2278 }; 2279 } else { 2280 mHandler = null; 2281 } 2282 } 2283 2284 Handler getHandler() { 2285 return mHandler; 2286 } 2287 } 2288 2289 //--------------------------------------------------------- 2290 // Java methods called from the native side 2291 //-------------------- 2292 @SuppressWarnings("unused") 2293 private static void postEventFromNative(Object audiotrack_ref, 2294 int what, int arg1, int arg2, Object obj) { 2295 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 2296 AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get(); 2297 if (track == null) { 2298 return; 2299 } 2300 2301 if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) { 2302 track.broadcastRoutingChange(); 2303 return; 2304 } 2305 NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate; 2306 if (delegate != null) { 2307 Handler handler = delegate.getHandler(); 2308 if (handler != null) { 2309 Message m = handler.obtainMessage(what, arg1, arg2, obj); 2310 handler.sendMessage(m); 2311 } 2312 } 2313 } 2314 2315 2316 //--------------------------------------------------------- 2317 // Native methods called from the Java side 2318 //-------------------- 2319 2320 // post-condition: mStreamType is overwritten with a value 2321 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 2322 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC 2323 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 2324 Object /*AudioAttributes*/ attributes, 2325 int sampleRate, int channelMask, int channelIndexMask, int audioFormat, 2326 int buffSizeInBytes, int mode, int[] sessionId); 2327 2328 private native final void native_finalize(); 2329 2330 private native final void native_release(); 2331 2332 private native final void native_start(); 2333 2334 private native final void native_stop(); 2335 2336 private native final void native_pause(); 2337 2338 private native final void native_flush(); 2339 2340 private native final int native_write_byte(byte[] audioData, 2341 int offsetInBytes, int sizeInBytes, int format, 2342 boolean isBlocking); 2343 2344 private native final int native_write_short(short[] audioData, 2345 int offsetInShorts, int sizeInShorts, int format, 2346 boolean isBlocking); 2347 2348 private native final int native_write_float(float[] audioData, 2349 int offsetInFloats, int sizeInFloats, int format, 2350 boolean isBlocking); 2351 2352 private native final int native_write_native_bytes(Object audioData, 2353 int positionInBytes, int sizeInBytes, int format, boolean blocking); 2354 2355 private native final int native_reload_static(); 2356 2357 private native final int native_get_native_frame_count(); 2358 2359 private native final void native_setVolume(float leftVolume, float rightVolume); 2360 2361 private native final int native_set_playback_rate(int sampleRateInHz); 2362 private native final int native_get_playback_rate(); 2363 2364 private native final void native_set_playback_params(@NonNull PlaybackParams params); 2365 private native final @NonNull PlaybackParams native_get_playback_params(); 2366 2367 private native final int native_set_marker_pos(int marker); 2368 private native final int native_get_marker_pos(); 2369 2370 private native final int native_set_pos_update_period(int updatePeriod); 2371 private native final int native_get_pos_update_period(); 2372 2373 private native final int native_set_position(int position); 2374 private native final int native_get_position(); 2375 2376 private native final int native_get_latency(); 2377 2378 // longArray must be a non-null array of length >= 2 2379 // [0] is assigned the frame position 2380 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds 2381 private native final int native_get_timestamp(long[] longArray); 2382 2383 private native final int native_set_loop(int start, int end, int loopCount); 2384 2385 static private native final int native_get_output_sample_rate(int streamType); 2386 static private native final int native_get_min_buff_size( 2387 int sampleRateInHz, int channelConfig, int audioFormat); 2388 2389 private native final int native_attachAuxEffect(int effectId); 2390 private native final int native_setAuxEffectSendLevel(float level); 2391 2392 private native final boolean native_setOutputDevice(int deviceId); 2393 private native final int native_getRoutedDeviceId(); 2394 private native final void native_enableDeviceCallback(); 2395 private native final void native_disableDeviceCallback(); 2396 2397 //--------------------------------------------------------- 2398 // Utility methods 2399 //------------------ 2400 2401 private static void logd(String msg) { 2402 Log.d(TAG, msg); 2403 } 2404 2405 private static void loge(String msg) { 2406 Log.e(TAG, msg); 2407 } 2408} 2409