AudioTrack.java revision 2bf39d8e9f9bdcbe548f8bd8acb89def382b467d
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17package android.media; 18 19import java.lang.annotation.Retention; 20import java.lang.annotation.RetentionPolicy; 21import java.lang.ref.WeakReference; 22import java.lang.Math; 23import java.nio.ByteBuffer; 24import java.nio.ByteOrder; 25import java.nio.NioUtils; 26import java.util.Collection; 27 28import android.annotation.IntDef; 29import android.annotation.NonNull; 30import android.app.ActivityThread; 31import android.app.AppOpsManager; 32import android.content.Context; 33import android.os.Handler; 34import android.os.IBinder; 35import android.os.Looper; 36import android.os.Message; 37import android.os.Process; 38import android.os.RemoteException; 39import android.os.ServiceManager; 40import android.util.ArrayMap; 41import android.util.Log; 42 43import com.android.internal.app.IAppOpsService; 44 45/** 46 * The AudioTrack class manages and plays a single audio resource for Java applications. 47 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 48 * achieved by "pushing" the data to the AudioTrack object using one of the 49 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 50 * and {@link #write(float[], int, int, int)} methods. 51 * 52 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 53 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 54 * one of the {@code write()} methods. These are blocking and return when the data has been 55 * transferred from the Java layer to the native layer and queued for playback. The streaming 56 * mode is most useful when playing blocks of audio data that for instance are: 57 * 58 * <ul> 59 * <li>too big to fit in memory because of the duration of the sound to play,</li> 60 * <li>too big to fit in memory because of the characteristics of the audio data 61 * (high sampling rate, bits per sample ...)</li> 62 * <li>received or generated while previously queued audio is playing.</li> 63 * </ul> 64 * 65 * The static mode should be chosen when dealing with short sounds that fit in memory and 66 * that need to be played with the smallest latency possible. The static mode will 67 * therefore be preferred for UI and game sounds that are played often, and with the 68 * smallest overhead possible. 69 * 70 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 71 * The size of this buffer, specified during the construction, determines how long an AudioTrack 72 * can play before running out of data.<br> 73 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 74 * be played from it.<br> 75 * For the streaming mode, data will be written to the audio sink in chunks of 76 * sizes less than or equal to the total buffer size. 77 * 78 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 79 */ 80public class AudioTrack implements AudioRouting 81{ 82 //--------------------------------------------------------- 83 // Constants 84 //-------------------- 85 /** Minimum value for a linear gain or auxiliary effect level. 86 * This value must be exactly equal to 0.0f; do not change it. 87 */ 88 private static final float GAIN_MIN = 0.0f; 89 /** Maximum value for a linear gain or auxiliary effect level. 90 * This value must be greater than or equal to 1.0f. 91 */ 92 private static final float GAIN_MAX = 1.0f; 93 94 /** Minimum value for sample rate */ 95 private static final int SAMPLE_RATE_HZ_MIN = 4000; 96 /** Maximum value for sample rate */ 97 private static final int SAMPLE_RATE_HZ_MAX = 192000; 98 99 /** Maximum value for AudioTrack channel count 100 * @hide public for MediaCode only, do not un-hide or change to a numeric literal 101 */ 102 public static final int CHANNEL_COUNT_MAX = native_get_FCC_8(); 103 104 /** indicates AudioTrack state is stopped */ 105 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 106 /** indicates AudioTrack state is paused */ 107 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 108 /** indicates AudioTrack state is playing */ 109 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 110 111 // keep these values in sync with android_media_AudioTrack.cpp 112 /** 113 * Creation mode where audio data is transferred from Java to the native layer 114 * only once before the audio starts playing. 115 */ 116 public static final int MODE_STATIC = 0; 117 /** 118 * Creation mode where audio data is streamed from Java to the native layer 119 * as the audio is playing. 120 */ 121 public static final int MODE_STREAM = 1; 122 123 /** @hide */ 124 @IntDef({ 125 MODE_STATIC, 126 MODE_STREAM 127 }) 128 @Retention(RetentionPolicy.SOURCE) 129 public @interface TransferMode {} 130 131 /** 132 * State of an AudioTrack that was not successfully initialized upon creation. 133 */ 134 public static final int STATE_UNINITIALIZED = 0; 135 /** 136 * State of an AudioTrack that is ready to be used. 137 */ 138 public static final int STATE_INITIALIZED = 1; 139 /** 140 * State of a successfully initialized AudioTrack that uses static data, 141 * but that hasn't received that data yet. 142 */ 143 public static final int STATE_NO_STATIC_DATA = 2; 144 145 /** 146 * Denotes a successful operation. 147 */ 148 public static final int SUCCESS = AudioSystem.SUCCESS; 149 /** 150 * Denotes a generic operation failure. 151 */ 152 public static final int ERROR = AudioSystem.ERROR; 153 /** 154 * Denotes a failure due to the use of an invalid value. 155 */ 156 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 157 /** 158 * Denotes a failure due to the improper use of a method. 159 */ 160 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 161 /** 162 * An error code indicating that the object reporting it is no longer valid and needs to 163 * be recreated. 164 * @hide 165 */ 166 public static final int ERROR_DEAD_OBJECT = AudioSystem.DEAD_OBJECT; 167 /** 168 * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state, 169 * or immediately after start/ACTIVE. 170 * @hide 171 */ 172 public static final int ERROR_WOULD_BLOCK = AudioSystem.WOULD_BLOCK; 173 174 // Error codes: 175 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 176 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 177 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 178 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 179 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 180 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 181 182 // Events: 183 // to keep in sync with frameworks/av/include/media/AudioTrack.h 184 /** 185 * Event id denotes when playback head has reached a previously set marker. 186 */ 187 private static final int NATIVE_EVENT_MARKER = 3; 188 /** 189 * Event id denotes when previously set update period has elapsed during playback. 190 */ 191 private static final int NATIVE_EVENT_NEW_POS = 4; 192 193 private final static String TAG = "android.media.AudioTrack"; 194 195 196 /** @hide */ 197 @IntDef({ 198 WRITE_BLOCKING, 199 WRITE_NON_BLOCKING 200 }) 201 @Retention(RetentionPolicy.SOURCE) 202 public @interface WriteMode {} 203 204 /** 205 * The write mode indicating the write operation will block until all data has been written, 206 * to be used as the actual value of the writeMode parameter in 207 * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)}, 208 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 209 * {@link #write(ByteBuffer, int, int, long)}. 210 */ 211 public final static int WRITE_BLOCKING = 0; 212 213 /** 214 * The write mode indicating the write operation will return immediately after 215 * queuing as much audio data for playback as possible without blocking, 216 * to be used as the actual value of the writeMode parameter in 217 * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)}, 218 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 219 * {@link #write(ByteBuffer, int, int, long)}. 220 */ 221 public final static int WRITE_NON_BLOCKING = 1; 222 223 //-------------------------------------------------------------------------- 224 // Member variables 225 //-------------------- 226 /** 227 * Indicates the state of the AudioTrack instance. 228 * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA. 229 */ 230 private int mState = STATE_UNINITIALIZED; 231 /** 232 * Indicates the play state of the AudioTrack instance. 233 * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING. 234 */ 235 private int mPlayState = PLAYSTATE_STOPPED; 236 /** 237 * Lock to ensure mPlayState updates reflect the actual state of the object. 238 */ 239 private final Object mPlayStateLock = new Object(); 240 /** 241 * Sizes of the native audio buffer. 242 * These values are set during construction and can be stale. 243 * To obtain the current native audio buffer frame count use {@link #getBufferSizeInFrames()}. 244 */ 245 private int mNativeBufferSizeInBytes = 0; 246 private int mNativeBufferSizeInFrames = 0; 247 /** 248 * Handler for events coming from the native code. 249 */ 250 private NativePositionEventHandlerDelegate mEventHandlerDelegate; 251 /** 252 * Looper associated with the thread that creates the AudioTrack instance. 253 */ 254 private final Looper mInitializationLooper; 255 /** 256 * The audio data source sampling rate in Hz. 257 */ 258 private int mSampleRate; // initialized by all constructors via audioParamCheck() 259 /** 260 * The number of audio output channels (1 is mono, 2 is stereo, etc.). 261 */ 262 private int mChannelCount = 1; 263 /** 264 * The audio channel mask used for calling native AudioTrack 265 */ 266 private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 267 268 /** 269 * The type of the audio stream to play. See 270 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 271 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 272 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 273 * {@link AudioManager#STREAM_DTMF}. 274 */ 275 private int mStreamType = AudioManager.STREAM_MUSIC; 276 277 private final AudioAttributes mAttributes; 278 /** 279 * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM. 280 */ 281 private int mDataLoadMode = MODE_STREAM; 282 /** 283 * The current channel position mask, as specified on AudioTrack creation. 284 * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}. 285 * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified. 286 */ 287 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 288 /** 289 * The channel index mask if specified, otherwise 0. 290 */ 291 private int mChannelIndexMask = 0; 292 /** 293 * The encoding of the audio samples. 294 * @see AudioFormat#ENCODING_PCM_8BIT 295 * @see AudioFormat#ENCODING_PCM_16BIT 296 * @see AudioFormat#ENCODING_PCM_FLOAT 297 */ 298 private int mAudioFormat; // initialized by all constructors via audioParamCheck() 299 /** 300 * Audio session ID 301 */ 302 private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE; 303 /** 304 * Reference to the app-ops service. 305 */ 306 private final IAppOpsService mAppOps; 307 /** 308 * HW_AV_SYNC track AV Sync Header 309 */ 310 private ByteBuffer mAvSyncHeader = null; 311 /** 312 * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header 313 */ 314 private int mAvSyncBytesRemaining = 0; 315 316 //-------------------------------- 317 // Used exclusively by native code 318 //-------------------- 319 /** 320 * @hide 321 * Accessed by native methods: provides access to C++ AudioTrack object. 322 */ 323 @SuppressWarnings("unused") 324 protected long mNativeTrackInJavaObj; 325 /** 326 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 327 * the native AudioTrack object, but not stored in it). 328 */ 329 @SuppressWarnings("unused") 330 private long mJniData; 331 332 333 //-------------------------------------------------------------------------- 334 // Constructor, Finalize 335 //-------------------- 336 /** 337 * Class constructor. 338 * @param streamType the type of the audio stream. See 339 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 340 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 341 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 342 * @param sampleRateInHz the initial source sample rate expressed in Hz. 343 * @param channelConfig describes the configuration of the audio channels. 344 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 345 * {@link AudioFormat#CHANNEL_OUT_STEREO} 346 * @param audioFormat the format in which the audio data is represented. 347 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 348 * {@link AudioFormat#ENCODING_PCM_8BIT}, 349 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 350 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 351 * read from for playback. This should be a multiple of the frame size in bytes. 352 * <p> If the track's creation mode is {@link #MODE_STATIC}, 353 * this is the maximum length sample, or audio clip, that can be played by this instance. 354 * <p> If the track's creation mode is {@link #MODE_STREAM}, 355 * this should be the desired buffer size 356 * for the <code>AudioTrack</code> to satisfy the application's 357 * natural latency requirements. 358 * If <code>bufferSizeInBytes</code> is less than the 359 * minimum buffer size for the output sink, it is automatically increased to the minimum 360 * buffer size. 361 * The method {@link #getBufferSizeInFrames()} returns the 362 * actual size in frames of the native buffer created, which 363 * determines the frequency to write 364 * to the streaming <code>AudioTrack</code> to avoid underrun. 365 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 366 * @throws java.lang.IllegalArgumentException 367 */ 368 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 369 int bufferSizeInBytes, int mode) 370 throws IllegalArgumentException { 371 this(streamType, sampleRateInHz, channelConfig, audioFormat, 372 bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE); 373 } 374 375 /** 376 * Class constructor with audio session. Use this constructor when the AudioTrack must be 377 * attached to a particular audio session. The primary use of the audio session ID is to 378 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 379 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 380 * and media players in the same session and not to the output mix. 381 * When an AudioTrack is created without specifying a session, it will create its own session 382 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 383 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 384 * session 385 * with all other media players or audio tracks in the same session, otherwise a new session 386 * will be created for this track if none is supplied. 387 * @param streamType the type of the audio stream. See 388 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 389 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 390 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 391 * @param sampleRateInHz the initial source sample rate expressed in Hz. 392 * @param channelConfig describes the configuration of the audio channels. 393 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 394 * {@link AudioFormat#CHANNEL_OUT_STEREO} 395 * @param audioFormat the format in which the audio data is represented. 396 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 397 * {@link AudioFormat#ENCODING_PCM_8BIT}, 398 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 399 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 400 * from for playback. If using the AudioTrack in streaming mode, you can write data into 401 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 402 * this is the maximum size of the sound that will be played for this instance. 403 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 404 * for the successful creation of an AudioTrack instance in streaming mode. Using values 405 * smaller than getMinBufferSize() will result in an initialization failure. 406 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 407 * @param sessionId Id of audio session the AudioTrack must be attached to 408 * @throws java.lang.IllegalArgumentException 409 */ 410 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 411 int bufferSizeInBytes, int mode, int sessionId) 412 throws IllegalArgumentException { 413 // mState already == STATE_UNINITIALIZED 414 this((new AudioAttributes.Builder()) 415 .setLegacyStreamType(streamType) 416 .build(), 417 (new AudioFormat.Builder()) 418 .setChannelMask(channelConfig) 419 .setEncoding(audioFormat) 420 .setSampleRate(sampleRateInHz) 421 .build(), 422 bufferSizeInBytes, 423 mode, sessionId); 424 } 425 426 /** 427 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 428 * @param attributes a non-null {@link AudioAttributes} instance. 429 * @param format a non-null {@link AudioFormat} instance describing the format of the data 430 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 431 * configuring the audio format parameters such as encoding, channel mask and sample rate. 432 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 433 * from for playback. If using the AudioTrack in streaming mode, you can write data into 434 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 435 * this is the maximum size of the sound that will be played for this instance. 436 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 437 * for the successful creation of an AudioTrack instance in streaming mode. Using values 438 * smaller than getMinBufferSize() will result in an initialization failure. 439 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 440 * @param sessionId ID of audio session the AudioTrack must be attached to, or 441 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 442 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 443 * construction. 444 * @throws IllegalArgumentException 445 */ 446 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 447 int mode, int sessionId) 448 throws IllegalArgumentException { 449 // mState already == STATE_UNINITIALIZED 450 451 if (attributes == null) { 452 throw new IllegalArgumentException("Illegal null AudioAttributes"); 453 } 454 if (format == null) { 455 throw new IllegalArgumentException("Illegal null AudioFormat"); 456 } 457 458 // remember which looper is associated with the AudioTrack instantiation 459 Looper looper; 460 if ((looper = Looper.myLooper()) == null) { 461 looper = Looper.getMainLooper(); 462 } 463 464 int rate = 0; 465 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0) 466 { 467 rate = format.getSampleRate(); 468 } else { 469 rate = AudioSystem.getPrimaryOutputSamplingRate(); 470 if (rate <= 0) { 471 rate = 44100; 472 } 473 } 474 int channelIndexMask = 0; 475 if ((format.getPropertySetMask() 476 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { 477 channelIndexMask = format.getChannelIndexMask(); 478 } 479 int channelMask = 0; 480 if ((format.getPropertySetMask() 481 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { 482 channelMask = format.getChannelMask(); 483 } else if (channelIndexMask == 0) { // if no masks at all, use stereo 484 channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT 485 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 486 } 487 int encoding = AudioFormat.ENCODING_DEFAULT; 488 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 489 encoding = format.getEncoding(); 490 } 491 audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); 492 mStreamType = AudioSystem.STREAM_DEFAULT; 493 494 audioBuffSizeCheck(bufferSizeInBytes); 495 496 mInitializationLooper = looper; 497 IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE); 498 mAppOps = IAppOpsService.Stub.asInterface(b); 499 500 mAttributes = new AudioAttributes.Builder(attributes).build(); 501 502 if (sessionId < 0) { 503 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 504 } 505 506 int[] session = new int[1]; 507 session[0] = sessionId; 508 // native initialization 509 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 510 mSampleRate, mChannelMask, mChannelIndexMask, mAudioFormat, 511 mNativeBufferSizeInBytes, mDataLoadMode, session); 512 if (initResult != SUCCESS) { 513 loge("Error code "+initResult+" when initializing AudioTrack."); 514 return; // with mState == STATE_UNINITIALIZED 515 } 516 517 mSessionId = session[0]; 518 519 if (mDataLoadMode == MODE_STATIC) { 520 mState = STATE_NO_STATIC_DATA; 521 } else { 522 mState = STATE_INITIALIZED; 523 } 524 } 525 526 /** 527 * A constructor which explicitly connects a Native (C++) AudioTrack. For use by 528 * the AudioTrackRoutingProxy subclass. 529 * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack 530 * (associated with an OpenSL ES player). 531 */ 532 /*package*/ AudioTrack(long nativeTrackInJavaObj) { 533 mNativeTrackInJavaObj = nativeTrackInJavaObj; 534 535 // "final"s 536 mAttributes = null; 537 mAppOps = null; 538 539 // remember which looper is associated with the AudioTrack instantiation 540 Looper looper; 541 if ((looper = Looper.myLooper()) == null) { 542 looper = Looper.getMainLooper(); 543 } 544 mInitializationLooper = looper; 545 546 // other initialization... 547 548 mState = STATE_INITIALIZED; 549 } 550 551 /** 552 * Builder class for {@link AudioTrack} objects. 553 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 554 * attributes and audio format parameters, you indicate which of those vary from the default 555 * behavior on the device. 556 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 557 * parameters, to be used by a new <code>AudioTrack</code> instance: 558 * 559 * <pre class="prettyprint"> 560 * AudioTrack player = new AudioTrack.Builder() 561 * .setAudioAttributes(new AudioAttributes.Builder() 562 * .setUsage(AudioAttributes.USAGE_ALARM) 563 * .setContentType(CONTENT_TYPE_MUSIC) 564 * .build()) 565 * .setAudioFormat(new AudioFormat.Builder() 566 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 567 * .setSampleRate(441000) 568 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 569 * .build()) 570 * .setBufferSize(minBuffSize) 571 * .build(); 572 * </pre> 573 * <p> 574 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 575 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 576 * <br>If the audio format is not specified or is incomplete, its sample rate will be the 577 * default output sample rate of the device (see 578 * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be 579 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 580 * {@link AudioFormat#ENCODING_PCM_16BIT}. 581 * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)}, 582 * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used. 583 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 584 * <code>MODE_STREAM</code> will be used. 585 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 586 * be generated. 587 */ 588 public static class Builder { 589 private AudioAttributes mAttributes; 590 private AudioFormat mFormat; 591 private int mBufferSizeInBytes; 592 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 593 private int mMode = MODE_STREAM; 594 595 /** 596 * Constructs a new Builder with the default values as described above. 597 */ 598 public Builder() { 599 } 600 601 /** 602 * Sets the {@link AudioAttributes}. 603 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 604 * data to be played. 605 * @return the same Builder instance. 606 * @throws IllegalArgumentException 607 */ 608 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 609 throws IllegalArgumentException { 610 if (attributes == null) { 611 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 612 } 613 // keep reference, we only copy the data when building 614 mAttributes = attributes; 615 return this; 616 } 617 618 /** 619 * Sets the format of the audio data to be played by the {@link AudioTrack}. 620 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 621 * as encoding, channel mask and sample rate. 622 * @param format a non-null {@link AudioFormat} instance. 623 * @return the same Builder instance. 624 * @throws IllegalArgumentException 625 */ 626 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 627 throws IllegalArgumentException { 628 if (format == null) { 629 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 630 } 631 // keep reference, we only copy the data when building 632 mFormat = format; 633 return this; 634 } 635 636 /** 637 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 638 * If using the {@link AudioTrack} in streaming mode 639 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 640 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 641 * the minimum required buffer size for the successful creation of an AudioTrack instance 642 * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result 643 * in an exception when trying to build the <code>AudioTrack</code>. 644 * <br>If using the <code>AudioTrack</code> in static mode (see 645 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 646 * played by this instance. 647 * @param bufferSizeInBytes 648 * @return the same Builder instance. 649 * @throws IllegalArgumentException 650 */ 651 public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes) 652 throws IllegalArgumentException { 653 if (bufferSizeInBytes <= 0) { 654 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 655 } 656 mBufferSizeInBytes = bufferSizeInBytes; 657 return this; 658 } 659 660 /** 661 * Sets the mode under which buffers of audio data are transferred from the 662 * {@link AudioTrack} to the framework. 663 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 664 * @return the same Builder instance. 665 * @throws IllegalArgumentException 666 */ 667 public @NonNull Builder setTransferMode(@TransferMode int mode) 668 throws IllegalArgumentException { 669 switch(mode) { 670 case MODE_STREAM: 671 case MODE_STATIC: 672 mMode = mode; 673 break; 674 default: 675 throw new IllegalArgumentException("Invalid transfer mode " + mode); 676 } 677 return this; 678 } 679 680 /** 681 * Sets the session ID the {@link AudioTrack} will be attached to. 682 * @param sessionId a strictly positive ID number retrieved from another 683 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 684 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 685 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 686 * @return the same Builder instance. 687 * @throws IllegalArgumentException 688 */ 689 public @NonNull Builder setSessionId(int sessionId) 690 throws IllegalArgumentException { 691 if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 692 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 693 } 694 mSessionId = sessionId; 695 return this; 696 } 697 698 /** 699 * Builds an {@link AudioTrack} instance initialized with all the parameters set 700 * on this <code>Builder</code>. 701 * @return a new successfully initialized {@link AudioTrack} instance. 702 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 703 * were incompatible, or if they are not supported by the device, 704 * or if the device was not available. 705 */ 706 public @NonNull AudioTrack build() throws UnsupportedOperationException { 707 if (mAttributes == null) { 708 mAttributes = new AudioAttributes.Builder() 709 .setUsage(AudioAttributes.USAGE_MEDIA) 710 .build(); 711 } 712 if (mFormat == null) { 713 mFormat = new AudioFormat.Builder() 714 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 715 .setSampleRate(AudioSystem.getPrimaryOutputSamplingRate()) 716 .setEncoding(AudioFormat.ENCODING_DEFAULT) 717 .build(); 718 } 719 try { 720 // If the buffer size is not specified in streaming mode, 721 // use a single frame for the buffer size and let the 722 // native code figure out the minimum buffer size. 723 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) { 724 mBufferSizeInBytes = mFormat.getChannelCount() 725 * mFormat.getBytesPerSample(mFormat.getEncoding()); 726 } 727 final AudioTrack track = new AudioTrack( 728 mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId); 729 if (track.getState() == STATE_UNINITIALIZED) { 730 // release is not necessary 731 throw new UnsupportedOperationException("Cannot create AudioTrack"); 732 } 733 return track; 734 } catch (IllegalArgumentException e) { 735 throw new UnsupportedOperationException(e.getMessage()); 736 } 737 } 738 } 739 740 // mask of all the positional channels supported, however the allowed combinations 741 // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX 742 private static final int SUPPORTED_OUT_CHANNELS = 743 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 744 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 745 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 746 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 747 AudioFormat.CHANNEL_OUT_BACK_LEFT | 748 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 749 AudioFormat.CHANNEL_OUT_BACK_CENTER | 750 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 751 AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 752 753 // Convenience method for the constructor's parameter checks. 754 // This is where constructor IllegalArgumentException-s are thrown 755 // postconditions: 756 // mChannelCount is valid 757 // mChannelMask is valid 758 // mAudioFormat is valid 759 // mSampleRate is valid 760 // mDataLoadMode is valid 761 private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, 762 int audioFormat, int mode) { 763 //-------------- 764 // sample rate, note these values are subject to change 765 if (sampleRateInHz < SAMPLE_RATE_HZ_MIN || sampleRateInHz > SAMPLE_RATE_HZ_MAX) { 766 throw new IllegalArgumentException(sampleRateInHz 767 + "Hz is not a supported sample rate."); 768 } 769 mSampleRate = sampleRateInHz; 770 771 //-------------- 772 // channel config 773 mChannelConfiguration = channelConfig; 774 775 switch (channelConfig) { 776 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 777 case AudioFormat.CHANNEL_OUT_MONO: 778 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 779 mChannelCount = 1; 780 mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 781 break; 782 case AudioFormat.CHANNEL_OUT_STEREO: 783 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 784 mChannelCount = 2; 785 mChannelMask = AudioFormat.CHANNEL_OUT_STEREO; 786 break; 787 default: 788 if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) { 789 mChannelCount = 0; 790 break; // channel index configuration only 791 } 792 if (!isMultichannelConfigSupported(channelConfig)) { 793 // input channel configuration features unsupported channels 794 throw new IllegalArgumentException("Unsupported channel configuration."); 795 } 796 mChannelMask = channelConfig; 797 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 798 } 799 // check the channel index configuration (if present) 800 mChannelIndexMask = channelIndexMask; 801 if (mChannelIndexMask != 0) { 802 // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2 803 final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1; 804 if ((channelIndexMask & ~indexMask) != 0) { 805 throw new IllegalArgumentException("Unsupported channel index configuration " 806 + channelIndexMask); 807 } 808 int channelIndexCount = Integer.bitCount(channelIndexMask); 809 if (mChannelCount == 0) { 810 mChannelCount = channelIndexCount; 811 } else if (mChannelCount != channelIndexCount) { 812 throw new IllegalArgumentException("Channel count must match"); 813 } 814 } 815 816 //-------------- 817 // audio format 818 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 819 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 820 } 821 822 if (!AudioFormat.isPublicEncoding(audioFormat)) { 823 throw new IllegalArgumentException("Unsupported audio encoding."); 824 } 825 mAudioFormat = audioFormat; 826 827 //-------------- 828 // audio load mode 829 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 830 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 831 throw new IllegalArgumentException("Invalid mode."); 832 } 833 mDataLoadMode = mode; 834 } 835 836 /** 837 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 838 * @param channelConfig the mask to validate 839 * @return false if the AudioTrack can't be used with such a mask 840 */ 841 private static boolean isMultichannelConfigSupported(int channelConfig) { 842 // check for unsupported channels 843 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 844 loge("Channel configuration features unsupported channels"); 845 return false; 846 } 847 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 848 if (channelCount > CHANNEL_COUNT_MAX) { 849 loge("Channel configuration contains too many channels " + 850 channelCount + ">" + CHANNEL_COUNT_MAX); 851 return false; 852 } 853 // check for unsupported multichannel combinations: 854 // - FL/FR must be present 855 // - L/R channels must be paired (e.g. no single L channel) 856 final int frontPair = 857 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 858 if ((channelConfig & frontPair) != frontPair) { 859 loge("Front channels must be present in multichannel configurations"); 860 return false; 861 } 862 final int backPair = 863 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 864 if ((channelConfig & backPair) != 0) { 865 if ((channelConfig & backPair) != backPair) { 866 loge("Rear channels can't be used independently"); 867 return false; 868 } 869 } 870 final int sidePair = 871 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 872 if ((channelConfig & sidePair) != 0 873 && (channelConfig & sidePair) != sidePair) { 874 loge("Side channels can't be used independently"); 875 return false; 876 } 877 return true; 878 } 879 880 881 // Convenience method for the constructor's audio buffer size check. 882 // preconditions: 883 // mChannelCount is valid 884 // mAudioFormat is valid 885 // postcondition: 886 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) 887 private void audioBuffSizeCheck(int audioBufferSize) { 888 // NB: this section is only valid with PCM or IEC61937 data. 889 // To update when supporting compressed formats 890 int frameSizeInBytes; 891 if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) { 892 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat); 893 } else { 894 frameSizeInBytes = 1; 895 } 896 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 897 throw new IllegalArgumentException("Invalid audio buffer size."); 898 } 899 900 mNativeBufferSizeInBytes = audioBufferSize; 901 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 902 } 903 904 905 /** 906 * Releases the native AudioTrack resources. 907 */ 908 public void release() { 909 // even though native_release() stops the native AudioTrack, we need to stop 910 // AudioTrack subclasses too. 911 try { 912 stop(); 913 } catch(IllegalStateException ise) { 914 // don't raise an exception, we're releasing the resources. 915 } 916 native_release(); 917 mState = STATE_UNINITIALIZED; 918 } 919 920 @Override 921 protected void finalize() { 922 native_finalize(); 923 } 924 925 //-------------------------------------------------------------------------- 926 // Getters 927 //-------------------- 928 /** 929 * Returns the minimum gain value, which is the constant 0.0. 930 * Gain values less than 0.0 will be clamped to 0.0. 931 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 932 * @return the minimum value, which is the constant 0.0. 933 */ 934 static public float getMinVolume() { 935 return GAIN_MIN; 936 } 937 938 /** 939 * Returns the maximum gain value, which is greater than or equal to 1.0. 940 * Gain values greater than the maximum will be clamped to the maximum. 941 * <p>The word "volume" in the API name is historical; this is actually a gain. 942 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 943 * corresponds to a gain of 0 dB (sample values left unmodified). 944 * @return the maximum value, which is greater than or equal to 1.0. 945 */ 946 static public float getMaxVolume() { 947 return GAIN_MAX; 948 } 949 950 /** 951 * Returns the configured audio data sample rate in Hz 952 */ 953 public int getSampleRate() { 954 return mSampleRate; 955 } 956 957 /** 958 * Returns the current playback sample rate rate in Hz. 959 */ 960 public int getPlaybackRate() { 961 return native_get_playback_rate(); 962 } 963 964 /** 965 * Returns the current playback parameters. 966 * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters 967 * @return current {@link PlaybackParams}. 968 * @throws IllegalStateException if track is not initialized. 969 */ 970 public @NonNull PlaybackParams getPlaybackParams() { 971 return native_get_playback_params(); 972 } 973 974 /** 975 * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT}, 976 * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}. 977 */ 978 public int getAudioFormat() { 979 return mAudioFormat; 980 } 981 982 /** 983 * Returns the type of audio stream this AudioTrack is configured for. 984 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 985 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 986 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 987 * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}. 988 */ 989 public int getStreamType() { 990 return mStreamType; 991 } 992 993 /** 994 * Returns the configured channel position mask. 995 * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO}, 996 * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}. 997 * This method may return {@link AudioFormat#CHANNEL_INVALID} if 998 * a channel index mask was used. Consider 999 * {@link #getFormat()} instead, to obtain an {@link AudioFormat}, 1000 * which contains both the channel position mask and the channel index mask. 1001 */ 1002 public int getChannelConfiguration() { 1003 return mChannelConfiguration; 1004 } 1005 1006 /** 1007 * Returns the configured <code>AudioTrack</code> format. 1008 * @return an {@link AudioFormat} containing the 1009 * <code>AudioTrack</code> parameters at the time of configuration. 1010 */ 1011 public @NonNull AudioFormat getFormat() { 1012 AudioFormat.Builder builder = new AudioFormat.Builder() 1013 .setSampleRate(mSampleRate) 1014 .setEncoding(mAudioFormat); 1015 if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) { 1016 builder.setChannelMask(mChannelConfiguration); 1017 } 1018 if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) { 1019 builder.setChannelIndexMask(mChannelIndexMask); 1020 } 1021 return builder.build(); 1022 } 1023 1024 /** 1025 * Returns the configured number of channels. 1026 */ 1027 public int getChannelCount() { 1028 return mChannelCount; 1029 } 1030 1031 /** 1032 * Returns the state of the AudioTrack instance. This is useful after the 1033 * AudioTrack instance has been created to check if it was initialized 1034 * properly. This ensures that the appropriate resources have been acquired. 1035 * @see #STATE_UNINITIALIZED 1036 * @see #STATE_INITIALIZED 1037 * @see #STATE_NO_STATIC_DATA 1038 */ 1039 public int getState() { 1040 return mState; 1041 } 1042 1043 /** 1044 * Returns the playback state of the AudioTrack instance. 1045 * @see #PLAYSTATE_STOPPED 1046 * @see #PLAYSTATE_PAUSED 1047 * @see #PLAYSTATE_PLAYING 1048 */ 1049 public int getPlayState() { 1050 synchronized (mPlayStateLock) { 1051 return mPlayState; 1052 } 1053 } 1054 1055 1056 /** 1057 * Returns the effective size of the <code>AudioTrack</code> buffer 1058 * that the application writes to. 1059 * <p> This will be less than or equal to the result of 1060 * {@link #getBufferCapacityInFrames()}. 1061 * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called. 1062 * <p> If the track is subsequently routed to a different output sink, the buffer 1063 * size and capacity may enlarge to accommodate. 1064 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1065 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1066 * the size of the native <code>AudioTrack</code> buffer in bytes. 1067 * <p> See also {@link AudioManager#getProperty(String)} for key 1068 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1069 * @return current size in frames of the <code>AudioTrack</code> buffer. 1070 * @throws IllegalStateException 1071 */ 1072 public int getBufferSizeInFrames() { 1073 return native_get_buffer_size_frames(); 1074 } 1075 1076 /** 1077 * Limits the effective size of the <code>AudioTrack</code> buffer 1078 * that the application writes to. 1079 * <p> A write to this AudioTrack will not fill the buffer beyond this limit. 1080 * If a blocking write is used then the write will block until the the data 1081 * can fit within this limit. 1082 * <p>Changing this limit modifies the latency associated with 1083 * the buffer for this track. A smaller size will give lower latency 1084 * but there may be more glitches due to buffer underruns. 1085 * <p>The actual size used may not be equal to this requested size. 1086 * It will be limited to a valid range with a maximum of 1087 * {@link #getBufferCapacityInFrames()}. 1088 * It may also be adjusted slightly for internal reasons. 1089 * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE} 1090 * will be returned. 1091 * <p>This method is only supported for PCM audio. 1092 * It is not supported for compressed audio tracks. 1093 * 1094 * @param bufferSizeInFrames requested buffer size 1095 * @return the actual buffer size in frames or an error code, 1096 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION} 1097 * @throws IllegalStateException 1098 */ 1099 public int setBufferSizeInFrames(int bufferSizeInFrames) { 1100 if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) { 1101 return ERROR_INVALID_OPERATION; 1102 } 1103 if (bufferSizeInFrames < 0) { 1104 return ERROR_BAD_VALUE; 1105 } 1106 return native_set_buffer_size_frames(bufferSizeInFrames); 1107 } 1108 1109 /** 1110 * Returns the maximum size of the native <code>AudioTrack</code> buffer. 1111 * <p> If the track's creation mode is {@link #MODE_STATIC}, 1112 * it is equal to the specified bufferSizeInBytes on construction, converted to frame units. 1113 * A static track's native frame count will not change. 1114 * <p> If the track's creation mode is {@link #MODE_STREAM}, 1115 * it is greater than or equal to the specified bufferSizeInBytes converted to frame units. 1116 * For streaming tracks, this value may be rounded up to a larger value if needed by 1117 * the target output sink, and 1118 * if the track is subsequently routed to a different output sink, the native 1119 * frame count may enlarge to accommodate. 1120 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1121 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1122 * the size of the native <code>AudioTrack</code> buffer in bytes. 1123 * <p> See also {@link AudioManager#getProperty(String)} for key 1124 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1125 * @return maximum size in frames of the <code>AudioTrack</code> buffer. 1126 * @throws IllegalStateException 1127 */ 1128 public int getBufferCapacityInFrames() { 1129 return native_get_buffer_capacity_frames(); 1130 } 1131 1132 /** 1133 * Returns the frame count of the native <code>AudioTrack</code> buffer. 1134 * @return current size in frames of the <code>AudioTrack</code> buffer. 1135 * @throws IllegalStateException 1136 * @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead. 1137 */ 1138 @Deprecated 1139 protected int getNativeFrameCount() { 1140 return native_get_buffer_capacity_frames(); 1141 } 1142 1143 /** 1144 * Returns marker position expressed in frames. 1145 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 1146 * or zero if marker is disabled. 1147 */ 1148 public int getNotificationMarkerPosition() { 1149 return native_get_marker_pos(); 1150 } 1151 1152 /** 1153 * Returns the notification update period expressed in frames. 1154 * Zero means that no position update notifications are being delivered. 1155 */ 1156 public int getPositionNotificationPeriod() { 1157 return native_get_pos_update_period(); 1158 } 1159 1160 /** 1161 * Returns the playback head position expressed in frames. 1162 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 1163 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 1164 * This is a continuously advancing counter. It will wrap (overflow) periodically, 1165 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 1166 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 1167 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 1168 * the total number of frames played since reset, 1169 * <i>not</i> the current offset within the buffer. 1170 */ 1171 public int getPlaybackHeadPosition() { 1172 return native_get_position(); 1173 } 1174 1175 /** 1176 * Returns this track's estimated latency in milliseconds. This includes the latency due 1177 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 1178 * 1179 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 1180 * a better solution. 1181 * @hide 1182 */ 1183 public int getLatency() { 1184 return native_get_latency(); 1185 } 1186 1187 /** 1188 * Returns the number of underrun occurrences in the application-level write buffer 1189 * since the AudioTrack was created. 1190 * An underrun occurs if the application does not write audio 1191 * data quickly enough, causing the buffer to underflow 1192 * and a potential audio glitch or pop. 1193 * <p> 1194 * Underruns are less likely when buffer sizes are large. 1195 * It may be possible to eliminate underruns by recreating the AudioTrack with 1196 * a larger buffer. 1197 * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the 1198 * effective size of the buffer. 1199 */ 1200 public int getUnderrunCount() { 1201 return native_get_underrun_count(); 1202 } 1203 1204 /** 1205 * Returns the output sample rate in Hz for the specified stream type. 1206 */ 1207 static public int getNativeOutputSampleRate(int streamType) { 1208 return native_get_output_sample_rate(streamType); 1209 } 1210 1211 /** 1212 * Returns the minimum buffer size required for the successful creation of an AudioTrack 1213 * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't 1214 * guarantee a smooth playback under load, and higher values should be chosen according to 1215 * the expected frequency at which the buffer will be refilled with additional data to play. 1216 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 1217 * to a higher value than the initial source sample rate, be sure to configure the buffer size 1218 * based on the highest planned sample rate. 1219 * @param sampleRateInHz the source sample rate expressed in Hz. 1220 * @param channelConfig describes the configuration of the audio channels. 1221 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 1222 * {@link AudioFormat#CHANNEL_OUT_STEREO} 1223 * @param audioFormat the format in which the audio data is represented. 1224 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 1225 * {@link AudioFormat#ENCODING_PCM_8BIT}, 1226 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1227 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 1228 * or {@link #ERROR} if unable to query for output properties, 1229 * or the minimum buffer size expressed in bytes. 1230 */ 1231 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 1232 int channelCount = 0; 1233 switch(channelConfig) { 1234 case AudioFormat.CHANNEL_OUT_MONO: 1235 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1236 channelCount = 1; 1237 break; 1238 case AudioFormat.CHANNEL_OUT_STEREO: 1239 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1240 channelCount = 2; 1241 break; 1242 default: 1243 if (!isMultichannelConfigSupported(channelConfig)) { 1244 loge("getMinBufferSize(): Invalid channel configuration."); 1245 return ERROR_BAD_VALUE; 1246 } else { 1247 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1248 } 1249 } 1250 1251 if (!AudioFormat.isPublicEncoding(audioFormat)) { 1252 loge("getMinBufferSize(): Invalid audio format."); 1253 return ERROR_BAD_VALUE; 1254 } 1255 1256 // sample rate, note these values are subject to change 1257 if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) { 1258 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 1259 return ERROR_BAD_VALUE; 1260 } 1261 1262 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 1263 if (size <= 0) { 1264 loge("getMinBufferSize(): error querying hardware"); 1265 return ERROR; 1266 } 1267 else { 1268 return size; 1269 } 1270 } 1271 1272 /** 1273 * Returns the audio session ID. 1274 * 1275 * @return the ID of the audio session this AudioTrack belongs to. 1276 */ 1277 public int getAudioSessionId() { 1278 return mSessionId; 1279 } 1280 1281 /** 1282 * Poll for a timestamp on demand. 1283 * <p> 1284 * If you need to track timestamps during initial warmup or after a routing or mode change, 1285 * you should request a new timestamp periodically until the reported timestamps 1286 * show that the frame position is advancing, or until it becomes clear that 1287 * timestamps are unavailable for this route. 1288 * <p> 1289 * After the clock is advancing at a stable rate, 1290 * query for a new timestamp approximately once every 10 seconds to once per minute. 1291 * Calling this method more often is inefficient. 1292 * It is also counter-productive to call this method more often than recommended, 1293 * because the short-term differences between successive timestamp reports are not meaningful. 1294 * If you need a high-resolution mapping between frame position and presentation time, 1295 * consider implementing that at application level, based on low-resolution timestamps. 1296 * <p> 1297 * The audio data at the returned position may either already have been 1298 * presented, or may have not yet been presented but is committed to be presented. 1299 * It is not possible to request the time corresponding to a particular position, 1300 * or to request the (fractional) position corresponding to a particular time. 1301 * If you need such features, consider implementing them at application level. 1302 * 1303 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1304 * and owned by caller. 1305 * @return true if a timestamp is available, or false if no timestamp is available. 1306 * If a timestamp if available, 1307 * the AudioTimestamp instance is filled in with a position in frame units, together 1308 * with the estimated time when that frame was presented or is committed to 1309 * be presented. 1310 * In the case that no timestamp is available, any supplied instance is left unaltered. 1311 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 1312 * or during and immediately after a route change. 1313 * A timestamp is permanently unavailable for a given route if the route does not support 1314 * timestamps. In this case, the approximate frame position can be obtained 1315 * using {@link #getPlaybackHeadPosition}. 1316 * However, it may be useful to continue to query for 1317 * timestamps occasionally, to recover after a route change. 1318 */ 1319 // Add this text when the "on new timestamp" API is added: 1320 // Use if you need to get the most recent timestamp outside of the event callback handler. 1321 public boolean getTimestamp(AudioTimestamp timestamp) 1322 { 1323 if (timestamp == null) { 1324 throw new IllegalArgumentException(); 1325 } 1326 // It's unfortunate, but we have to either create garbage every time or use synchronized 1327 long[] longArray = new long[2]; 1328 int ret = native_get_timestamp(longArray); 1329 if (ret != SUCCESS) { 1330 return false; 1331 } 1332 timestamp.framePosition = longArray[0]; 1333 timestamp.nanoTime = longArray[1]; 1334 return true; 1335 } 1336 1337 /** 1338 * Poll for a timestamp on demand. 1339 * <p> 1340 * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code. 1341 * 1342 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1343 * and owned by caller. 1344 * @return {@link #SUCCESS} if a timestamp is available 1345 * {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called 1346 * immediately after start/ACTIVE, when the number of frames consumed is less than the 1347 * overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll 1348 * again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time 1349 * for the timestamp. 1350 * {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1351 * needs to be recreated. 1352 * {@link #ERROR_INVALID_OPERATION} if current route does not support 1353 * timestamps. In this case, the approximate frame position can be obtained 1354 * using {@link #getPlaybackHeadPosition}. 1355 * 1356 * The AudioTimestamp instance is filled in with a position in frame units, together 1357 * with the estimated time when that frame was presented or is committed to 1358 * be presented. 1359 * @hide 1360 */ 1361 // Add this text when the "on new timestamp" API is added: 1362 // Use if you need to get the most recent timestamp outside of the event callback handler. 1363 public int getTimestampWithStatus(AudioTimestamp timestamp) 1364 { 1365 if (timestamp == null) { 1366 throw new IllegalArgumentException(); 1367 } 1368 // It's unfortunate, but we have to either create garbage every time or use synchronized 1369 long[] longArray = new long[2]; 1370 int ret = native_get_timestamp(longArray); 1371 timestamp.framePosition = longArray[0]; 1372 timestamp.nanoTime = longArray[1]; 1373 return ret; 1374 } 1375 1376 //-------------------------------------------------------------------------- 1377 // Initialization / configuration 1378 //-------------------- 1379 /** 1380 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1381 * for each periodic playback head position update. 1382 * Notifications will be received in the same thread as the one in which the AudioTrack 1383 * instance was created. 1384 * @param listener 1385 */ 1386 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 1387 setPlaybackPositionUpdateListener(listener, null); 1388 } 1389 1390 /** 1391 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1392 * for each periodic playback head position update. 1393 * Use this method to receive AudioTrack events in the Handler associated with another 1394 * thread than the one in which you created the AudioTrack instance. 1395 * @param listener 1396 * @param handler the Handler that will receive the event notification messages. 1397 */ 1398 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 1399 Handler handler) { 1400 if (listener != null) { 1401 mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler); 1402 } else { 1403 mEventHandlerDelegate = null; 1404 } 1405 } 1406 1407 1408 private static float clampGainOrLevel(float gainOrLevel) { 1409 if (Float.isNaN(gainOrLevel)) { 1410 throw new IllegalArgumentException(); 1411 } 1412 if (gainOrLevel < GAIN_MIN) { 1413 gainOrLevel = GAIN_MIN; 1414 } else if (gainOrLevel > GAIN_MAX) { 1415 gainOrLevel = GAIN_MAX; 1416 } 1417 return gainOrLevel; 1418 } 1419 1420 1421 /** 1422 * Sets the specified left and right output gain values on the AudioTrack. 1423 * <p>Gain values are clamped to the closed interval [0.0, max] where 1424 * max is the value of {@link #getMaxVolume}. 1425 * A value of 0.0 results in zero gain (silence), and 1426 * a value of 1.0 means unity gain (signal unchanged). 1427 * The default value is 1.0 meaning unity gain. 1428 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1429 * @param leftGain output gain for the left channel. 1430 * @param rightGain output gain for the right channel 1431 * @return error code or success, see {@link #SUCCESS}, 1432 * {@link #ERROR_INVALID_OPERATION} 1433 * @deprecated Applications should use {@link #setVolume} instead, as it 1434 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1435 */ 1436 public int setStereoVolume(float leftGain, float rightGain) { 1437 if (isRestricted()) { 1438 return SUCCESS; 1439 } 1440 if (mState == STATE_UNINITIALIZED) { 1441 return ERROR_INVALID_OPERATION; 1442 } 1443 1444 leftGain = clampGainOrLevel(leftGain); 1445 rightGain = clampGainOrLevel(rightGain); 1446 1447 native_setVolume(leftGain, rightGain); 1448 1449 return SUCCESS; 1450 } 1451 1452 1453 /** 1454 * Sets the specified output gain value on all channels of this track. 1455 * <p>Gain values are clamped to the closed interval [0.0, max] where 1456 * max is the value of {@link #getMaxVolume}. 1457 * A value of 0.0 results in zero gain (silence), and 1458 * a value of 1.0 means unity gain (signal unchanged). 1459 * The default value is 1.0 meaning unity gain. 1460 * <p>This API is preferred over {@link #setStereoVolume}, as it 1461 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1462 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1463 * @param gain output gain for all channels. 1464 * @return error code or success, see {@link #SUCCESS}, 1465 * {@link #ERROR_INVALID_OPERATION} 1466 */ 1467 public int setVolume(float gain) { 1468 return setStereoVolume(gain, gain); 1469 } 1470 1471 1472 /** 1473 * Sets the playback sample rate for this track. This sets the sampling rate at which 1474 * the audio data will be consumed and played back 1475 * (as set by the sampleRateInHz parameter in the 1476 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 1477 * not the original sampling rate of the 1478 * content. For example, setting it to half the sample rate of the content will cause the 1479 * playback to last twice as long, but will also result in a pitch shift down by one octave. 1480 * The valid sample rate range is from 1 Hz to twice the value returned by 1481 * {@link #getNativeOutputSampleRate(int)}. 1482 * Use {@link #setPlaybackParams(PlaybackParams)} for speed control. 1483 * <p> This method may also be used to repurpose an existing <code>AudioTrack</code> 1484 * for playback of content of differing sample rate, 1485 * but with identical encoding and channel mask. 1486 * @param sampleRateInHz the sample rate expressed in Hz 1487 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1488 * {@link #ERROR_INVALID_OPERATION} 1489 */ 1490 public int setPlaybackRate(int sampleRateInHz) { 1491 if (mState != STATE_INITIALIZED) { 1492 return ERROR_INVALID_OPERATION; 1493 } 1494 if (sampleRateInHz <= 0) { 1495 return ERROR_BAD_VALUE; 1496 } 1497 return native_set_playback_rate(sampleRateInHz); 1498 } 1499 1500 1501 /** 1502 * Sets the playback parameters. 1503 * This method returns failure if it cannot apply the playback parameters. 1504 * One possible cause is that the parameters for speed or pitch are out of range. 1505 * Another possible cause is that the <code>AudioTrack</code> is streaming 1506 * (see {@link #MODE_STREAM}) and the 1507 * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer 1508 * on configuration must be larger than the speed multiplied by the minimum size 1509 * {@link #getMinBufferSize(int, int, int)}) to allow proper playback. 1510 * @param params see {@link PlaybackParams}. In particular, 1511 * speed, pitch, and audio mode should be set. 1512 * @throws IllegalArgumentException if the parameters are invalid or not accepted. 1513 * @throws IllegalStateException if track is not initialized. 1514 */ 1515 public void setPlaybackParams(@NonNull PlaybackParams params) { 1516 if (params == null) { 1517 throw new IllegalArgumentException("params is null"); 1518 } 1519 native_set_playback_params(params); 1520 } 1521 1522 1523 /** 1524 * Sets the position of the notification marker. At most one marker can be active. 1525 * @param markerInFrames marker position in wrapping frame units similar to 1526 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 1527 * To set a marker at a position which would appear as zero due to wraparound, 1528 * a workaround is to use a non-zero position near zero, such as -1 or 1. 1529 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1530 * {@link #ERROR_INVALID_OPERATION} 1531 */ 1532 public int setNotificationMarkerPosition(int markerInFrames) { 1533 if (mState == STATE_UNINITIALIZED) { 1534 return ERROR_INVALID_OPERATION; 1535 } 1536 return native_set_marker_pos(markerInFrames); 1537 } 1538 1539 1540 /** 1541 * Sets the period for the periodic notification event. 1542 * @param periodInFrames update period expressed in frames. 1543 * Zero period means no position updates. A negative period is not allowed. 1544 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 1545 */ 1546 public int setPositionNotificationPeriod(int periodInFrames) { 1547 if (mState == STATE_UNINITIALIZED) { 1548 return ERROR_INVALID_OPERATION; 1549 } 1550 return native_set_pos_update_period(periodInFrames); 1551 } 1552 1553 1554 /** 1555 * Sets the playback head position within the static buffer. 1556 * The track must be stopped or paused for the position to be changed, 1557 * and must use the {@link #MODE_STATIC} mode. 1558 * @param positionInFrames playback head position within buffer, expressed in frames. 1559 * Zero corresponds to start of buffer. 1560 * The position must not be greater than the buffer size in frames, or negative. 1561 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 1562 * the position values have different meanings. 1563 * <br> 1564 * If looping is currently enabled and the new position is greater than or equal to the 1565 * loop end marker, the behavior varies by API level: 1566 * as of {@link android.os.Build.VERSION_CODES#M}, 1567 * the looping is first disabled and then the position is set. 1568 * For earlier API levels, the behavior is unspecified. 1569 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1570 * {@link #ERROR_INVALID_OPERATION} 1571 */ 1572 public int setPlaybackHeadPosition(int positionInFrames) { 1573 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1574 getPlayState() == PLAYSTATE_PLAYING) { 1575 return ERROR_INVALID_OPERATION; 1576 } 1577 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 1578 return ERROR_BAD_VALUE; 1579 } 1580 return native_set_position(positionInFrames); 1581 } 1582 1583 /** 1584 * Sets the loop points and the loop count. The loop can be infinite. 1585 * Similarly to setPlaybackHeadPosition, 1586 * the track must be stopped or paused for the loop points to be changed, 1587 * and must use the {@link #MODE_STATIC} mode. 1588 * @param startInFrames loop start marker expressed in frames. 1589 * Zero corresponds to start of buffer. 1590 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 1591 * @param endInFrames loop end marker expressed in frames. 1592 * The total buffer size in frames corresponds to end of buffer. 1593 * The end marker must not be greater than the buffer size in frames. 1594 * For looping, the end marker must not be less than or equal to the start marker, 1595 * but to disable looping 1596 * it is permitted for start marker, end marker, and loop count to all be 0. 1597 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 1598 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 1599 * support, 1600 * {@link #ERROR_BAD_VALUE} is returned. 1601 * The loop range is the interval [startInFrames, endInFrames). 1602 * <br> 1603 * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged, 1604 * unless it is greater than or equal to the loop end marker, in which case 1605 * it is forced to the loop start marker. 1606 * For earlier API levels, the effect on position is unspecified. 1607 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 1608 * A value of -1 means infinite looping, and 0 disables looping. 1609 * A value of positive N means to "loop" (go back) N times. For example, 1610 * a value of one means to play the region two times in total. 1611 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1612 * {@link #ERROR_INVALID_OPERATION} 1613 */ 1614 public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) { 1615 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1616 getPlayState() == PLAYSTATE_PLAYING) { 1617 return ERROR_INVALID_OPERATION; 1618 } 1619 if (loopCount == 0) { 1620 ; // explicitly allowed as an exception to the loop region range check 1621 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 1622 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 1623 return ERROR_BAD_VALUE; 1624 } 1625 return native_set_loop(startInFrames, endInFrames, loopCount); 1626 } 1627 1628 /** 1629 * Sets the initialization state of the instance. This method was originally intended to be used 1630 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 1631 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 1632 * @param state the state of the AudioTrack instance 1633 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 1634 */ 1635 @Deprecated 1636 protected void setState(int state) { 1637 mState = state; 1638 } 1639 1640 1641 //--------------------------------------------------------- 1642 // Transport control methods 1643 //-------------------- 1644 /** 1645 * Starts playing an AudioTrack. 1646 * <p> 1647 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 1648 * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)}, 1649 * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)}, 1650 * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to 1651 * play(). 1652 * <p> 1653 * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to 1654 * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor). 1655 * If you don't call write() first, or if you call write() but with an insufficient amount of 1656 * data, then the track will be in underrun state at play(). In this case, 1657 * playback will not actually start playing until the data path is filled to a 1658 * device-specific minimum level. This requirement for the path to be filled 1659 * to a minimum level is also true when resuming audio playback after calling stop(). 1660 * Similarly the buffer will need to be filled up again after 1661 * the track underruns due to failure to call write() in a timely manner with sufficient data. 1662 * For portability, an application should prime the data path to the maximum allowed 1663 * by writing data until the write() method returns a short transfer count. 1664 * This allows play() to start immediately, and reduces the chance of underrun. 1665 * 1666 * @throws IllegalStateException if the track isn't properly initialized 1667 */ 1668 public void play() 1669 throws IllegalStateException { 1670 if (mState != STATE_INITIALIZED) { 1671 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 1672 } 1673 if (isRestricted()) { 1674 setVolume(0); 1675 } 1676 synchronized(mPlayStateLock) { 1677 native_start(); 1678 mPlayState = PLAYSTATE_PLAYING; 1679 } 1680 } 1681 1682 private boolean isRestricted() { 1683 if ((mAttributes.getAllFlags() & AudioAttributes.FLAG_BYPASS_INTERRUPTION_POLICY) != 0) { 1684 return false; 1685 } 1686 try { 1687 final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType); 1688 final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage, 1689 Process.myUid(), ActivityThread.currentPackageName()); 1690 return mode != AppOpsManager.MODE_ALLOWED; 1691 } catch (RemoteException e) { 1692 return false; 1693 } 1694 } 1695 1696 /** 1697 * Stops playing the audio data. 1698 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 1699 * after the last buffer that was written has been played. For an immediate stop, use 1700 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 1701 * back yet. 1702 * @throws IllegalStateException 1703 */ 1704 public void stop() 1705 throws IllegalStateException { 1706 if (mState != STATE_INITIALIZED) { 1707 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 1708 } 1709 1710 // stop playing 1711 synchronized(mPlayStateLock) { 1712 native_stop(); 1713 mPlayState = PLAYSTATE_STOPPED; 1714 mAvSyncHeader = null; 1715 mAvSyncBytesRemaining = 0; 1716 } 1717 } 1718 1719 /** 1720 * Pauses the playback of the audio data. Data that has not been played 1721 * back will not be discarded. Subsequent calls to {@link #play} will play 1722 * this data back. See {@link #flush()} to discard this data. 1723 * 1724 * @throws IllegalStateException 1725 */ 1726 public void pause() 1727 throws IllegalStateException { 1728 if (mState != STATE_INITIALIZED) { 1729 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 1730 } 1731 //logd("pause()"); 1732 1733 // pause playback 1734 synchronized(mPlayStateLock) { 1735 native_pause(); 1736 mPlayState = PLAYSTATE_PAUSED; 1737 } 1738 } 1739 1740 1741 //--------------------------------------------------------- 1742 // Audio data supply 1743 //-------------------- 1744 1745 /** 1746 * Flushes the audio data currently queued for playback. Any data that has 1747 * been written but not yet presented will be discarded. No-op if not stopped or paused, 1748 * or if the track's creation mode is not {@link #MODE_STREAM}. 1749 * <BR> Note that although data written but not yet presented is discarded, there is no 1750 * guarantee that all of the buffer space formerly used by that data 1751 * is available for a subsequent write. 1752 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 1753 * less than or equal to the total buffer size 1754 * may return a short actual transfer count. 1755 */ 1756 public void flush() { 1757 if (mState == STATE_INITIALIZED) { 1758 // flush the data in native layer 1759 native_flush(); 1760 mAvSyncHeader = null; 1761 mAvSyncBytesRemaining = 0; 1762 } 1763 1764 } 1765 1766 /** 1767 * Writes the audio data to the audio sink for playback (streaming mode), 1768 * or copies audio data for later playback (static buffer mode). 1769 * The format specified in the AudioTrack constructor should be 1770 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1771 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 1772 * <p> 1773 * In streaming mode, the write will normally block until all the data has been enqueued for 1774 * playback, and will return a full transfer count. However, if the track is stopped or paused 1775 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1776 * occurs during the write, then the write may return a short transfer count. 1777 * <p> 1778 * In static buffer mode, copies the data to the buffer starting at offset 0. 1779 * Note that the actual playback of this data might occur after this function returns. 1780 * 1781 * @param audioData the array that holds the data to play. 1782 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 1783 * starts. 1784 * Must not be negative, or cause the data access to go out of bounds of the array. 1785 * @param sizeInBytes the number of bytes to write in audioData after the offset. 1786 * Must not be negative, or cause the data access to go out of bounds of the array. 1787 * @return zero or the positive number of bytes that were written, or 1788 * {@link #ERROR_INVALID_OPERATION} 1789 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1790 * the parameters don't resolve to valid data and indexes, or 1791 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1792 * needs to be recreated. 1793 * The dead object error code is not returned if some data was successfully transferred. 1794 * In this case, the error is returned at the next write(). 1795 * The number of bytes will be a multiple of the frame size in bytes 1796 * not to exceed sizeInBytes. 1797 * 1798 * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code> 1799 * set to {@link #WRITE_BLOCKING}. 1800 */ 1801 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) { 1802 return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING); 1803 } 1804 1805 /** 1806 * Writes the audio data to the audio sink for playback (streaming mode), 1807 * or copies audio data for later playback (static buffer mode). 1808 * The format specified in the AudioTrack constructor should be 1809 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1810 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 1811 * <p> 1812 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1813 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1814 * for playback, and will return a full transfer count. However, if the write mode is 1815 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1816 * interrupts the write by calling stop or pause, or an I/O error 1817 * occurs during the write, then the write may return a short transfer count. 1818 * <p> 1819 * In static buffer mode, copies the data to the buffer starting at offset 0, 1820 * and the write mode is ignored. 1821 * Note that the actual playback of this data might occur after this function returns. 1822 * 1823 * @param audioData the array that holds the data to play. 1824 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 1825 * starts. 1826 * Must not be negative, or cause the data access to go out of bounds of the array. 1827 * @param sizeInBytes the number of bytes to write in audioData after the offset. 1828 * Must not be negative, or cause the data access to go out of bounds of the array. 1829 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1830 * effect in static mode. 1831 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1832 * to the audio sink. 1833 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1834 * queuing as much audio data for playback as possible without blocking. 1835 * @return zero or the positive number of bytes that were written, or 1836 * {@link #ERROR_INVALID_OPERATION} 1837 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1838 * the parameters don't resolve to valid data and indexes, or 1839 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1840 * needs to be recreated. 1841 * The dead object error code is not returned if some data was successfully transferred. 1842 * In this case, the error is returned at the next write(). 1843 * The number of bytes will be a multiple of the frame size in bytes 1844 * not to exceed sizeInBytes. 1845 */ 1846 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes, 1847 @WriteMode int writeMode) { 1848 1849 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1850 return ERROR_INVALID_OPERATION; 1851 } 1852 1853 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1854 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1855 return ERROR_BAD_VALUE; 1856 } 1857 1858 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 1859 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 1860 || (offsetInBytes + sizeInBytes > audioData.length)) { 1861 return ERROR_BAD_VALUE; 1862 } 1863 1864 int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 1865 writeMode == WRITE_BLOCKING); 1866 1867 if ((mDataLoadMode == MODE_STATIC) 1868 && (mState == STATE_NO_STATIC_DATA) 1869 && (ret > 0)) { 1870 // benign race with respect to other APIs that read mState 1871 mState = STATE_INITIALIZED; 1872 } 1873 1874 return ret; 1875 } 1876 1877 /** 1878 * Writes the audio data to the audio sink for playback (streaming mode), 1879 * or copies audio data for later playback (static buffer mode). 1880 * The format specified in the AudioTrack constructor should be 1881 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1882 * <p> 1883 * In streaming mode, the write will normally block until all the data has been enqueued for 1884 * playback, and will return a full transfer count. However, if the track is stopped or paused 1885 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1886 * occurs during the write, then the write may return a short transfer count. 1887 * <p> 1888 * In static buffer mode, copies the data to the buffer starting at offset 0. 1889 * Note that the actual playback of this data might occur after this function returns. 1890 * 1891 * @param audioData the array that holds the data to play. 1892 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1893 * starts. 1894 * Must not be negative, or cause the data access to go out of bounds of the array. 1895 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1896 * Must not be negative, or cause the data access to go out of bounds of the array. 1897 * @return zero or the positive number of shorts that were written, or 1898 * {@link #ERROR_INVALID_OPERATION} 1899 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1900 * the parameters don't resolve to valid data and indexes, or 1901 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1902 * needs to be recreated. 1903 * The dead object error code is not returned if some data was successfully transferred. 1904 * In this case, the error is returned at the next write(). 1905 * The number of shorts will be a multiple of the channel count not to exceed sizeInShorts. 1906 * 1907 * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code> 1908 * set to {@link #WRITE_BLOCKING}. 1909 */ 1910 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) { 1911 return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING); 1912 } 1913 1914 /** 1915 * Writes the audio data to the audio sink for playback (streaming mode), 1916 * or copies audio data for later playback (static buffer mode). 1917 * The format specified in the AudioTrack constructor should be 1918 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1919 * <p> 1920 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1921 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1922 * for playback, and will return a full transfer count. However, if the write mode is 1923 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1924 * interrupts the write by calling stop or pause, or an I/O error 1925 * occurs during the write, then the write may return a short transfer count. 1926 * <p> 1927 * In static buffer mode, copies the data to the buffer starting at offset 0. 1928 * Note that the actual playback of this data might occur after this function returns. 1929 * 1930 * @param audioData the array that holds the data to write. 1931 * @param offsetInShorts the offset expressed in shorts in audioData where the data to write 1932 * starts. 1933 * Must not be negative, or cause the data access to go out of bounds of the array. 1934 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1935 * Must not be negative, or cause the data access to go out of bounds of the array. 1936 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1937 * effect in static mode. 1938 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1939 * to the audio sink. 1940 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1941 * queuing as much audio data for playback as possible without blocking. 1942 * @return zero or the positive number of shorts that were written, or 1943 * {@link #ERROR_INVALID_OPERATION} 1944 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1945 * the parameters don't resolve to valid data and indexes, or 1946 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1947 * needs to be recreated. 1948 * The dead object error code is not returned if some data was successfully transferred. 1949 * In this case, the error is returned at the next write(). 1950 * The number of shorts will be a multiple of the channel count not to exceed sizeInShorts. 1951 */ 1952 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts, 1953 @WriteMode int writeMode) { 1954 1955 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1956 return ERROR_INVALID_OPERATION; 1957 } 1958 1959 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1960 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1961 return ERROR_BAD_VALUE; 1962 } 1963 1964 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 1965 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 1966 || (offsetInShorts + sizeInShorts > audioData.length)) { 1967 return ERROR_BAD_VALUE; 1968 } 1969 1970 int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat, 1971 writeMode == WRITE_BLOCKING); 1972 1973 if ((mDataLoadMode == MODE_STATIC) 1974 && (mState == STATE_NO_STATIC_DATA) 1975 && (ret > 0)) { 1976 // benign race with respect to other APIs that read mState 1977 mState = STATE_INITIALIZED; 1978 } 1979 1980 return ret; 1981 } 1982 1983 /** 1984 * Writes the audio data to the audio sink for playback (streaming mode), 1985 * or copies audio data for later playback (static buffer mode). 1986 * The format specified in the AudioTrack constructor should be 1987 * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array. 1988 * <p> 1989 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1990 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1991 * for playback, and will return a full transfer count. However, if the write mode is 1992 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1993 * interrupts the write by calling stop or pause, or an I/O error 1994 * occurs during the write, then the write may return a short transfer count. 1995 * <p> 1996 * In static buffer mode, copies the data to the buffer starting at offset 0, 1997 * and the write mode is ignored. 1998 * Note that the actual playback of this data might occur after this function returns. 1999 * 2000 * @param audioData the array that holds the data to write. 2001 * The implementation does not clip for sample values within the nominal range 2002 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 2003 * less than or equal to unity (1.0f), and in the absence of post-processing effects 2004 * that could add energy, such as reverb. For the convenience of applications 2005 * that compute samples using filters with non-unity gain, 2006 * sample values +3 dB beyond the nominal range are permitted. 2007 * However such values may eventually be limited or clipped, depending on various gains 2008 * and later processing in the audio path. Therefore applications are encouraged 2009 * to provide samples values within the nominal range. 2010 * @param offsetInFloats the offset, expressed as a number of floats, 2011 * in audioData where the data to write starts. 2012 * Must not be negative, or cause the data access to go out of bounds of the array. 2013 * @param sizeInFloats the number of floats to write in audioData after the offset. 2014 * Must not be negative, or cause the data access to go out of bounds of the array. 2015 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2016 * effect in static mode. 2017 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2018 * to the audio sink. 2019 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2020 * queuing as much audio data for playback as possible without blocking. 2021 * @return zero or the positive number of floats that were written, or 2022 * {@link #ERROR_INVALID_OPERATION} 2023 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 2024 * the parameters don't resolve to valid data and indexes, or 2025 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2026 * needs to be recreated. 2027 * The dead object error code is not returned if some data was successfully transferred. 2028 * In this case, the error is returned at the next write(). 2029 * The number of floats will be a multiple of the channel count not to exceed sizeInFloats. 2030 */ 2031 public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats, 2032 @WriteMode int writeMode) { 2033 2034 if (mState == STATE_UNINITIALIZED) { 2035 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2036 return ERROR_INVALID_OPERATION; 2037 } 2038 2039 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 2040 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 2041 return ERROR_INVALID_OPERATION; 2042 } 2043 2044 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2045 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2046 return ERROR_BAD_VALUE; 2047 } 2048 2049 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 2050 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 2051 || (offsetInFloats + sizeInFloats > audioData.length)) { 2052 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 2053 return ERROR_BAD_VALUE; 2054 } 2055 2056 int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 2057 writeMode == WRITE_BLOCKING); 2058 2059 if ((mDataLoadMode == MODE_STATIC) 2060 && (mState == STATE_NO_STATIC_DATA) 2061 && (ret > 0)) { 2062 // benign race with respect to other APIs that read mState 2063 mState = STATE_INITIALIZED; 2064 } 2065 2066 return ret; 2067 } 2068 2069 2070 /** 2071 * Writes the audio data to the audio sink for playback (streaming mode), 2072 * or copies audio data for later playback (static buffer mode). 2073 * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor. 2074 * <p> 2075 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2076 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2077 * for playback, and will return a full transfer count. However, if the write mode is 2078 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2079 * interrupts the write by calling stop or pause, or an I/O error 2080 * occurs during the write, then the write may return a short transfer count. 2081 * <p> 2082 * In static buffer mode, copies the data to the buffer starting at offset 0, 2083 * and the write mode is ignored. 2084 * Note that the actual playback of this data might occur after this function returns. 2085 * 2086 * @param audioData the buffer that holds the data to write, starting at the position reported 2087 * by <code>audioData.position()</code>. 2088 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2089 * have been advanced to reflect the amount of data that was successfully written to 2090 * the AudioTrack. 2091 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2092 * that the number of bytes requested be a multiple of the frame size (sample size in 2093 * bytes multiplied by the channel count). 2094 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2095 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2096 * effect in static mode. 2097 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2098 * to the audio sink. 2099 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2100 * queuing as much audio data for playback as possible without blocking. 2101 * @return zero or the positive number of bytes that were written, or 2102 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 2103 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2104 * needs to be recreated. 2105 * The dead object error code is not returned if some data was successfully transferred. 2106 * In this case, the error is returned at the next write(). 2107 */ 2108 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2109 @WriteMode int writeMode) { 2110 2111 if (mState == STATE_UNINITIALIZED) { 2112 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2113 return ERROR_INVALID_OPERATION; 2114 } 2115 2116 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2117 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2118 return ERROR_BAD_VALUE; 2119 } 2120 2121 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2122 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2123 return ERROR_BAD_VALUE; 2124 } 2125 2126 int ret = 0; 2127 if (audioData.isDirect()) { 2128 ret = native_write_native_bytes(audioData, 2129 audioData.position(), sizeInBytes, mAudioFormat, 2130 writeMode == WRITE_BLOCKING); 2131 } else { 2132 ret = native_write_byte(NioUtils.unsafeArray(audioData), 2133 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 2134 sizeInBytes, mAudioFormat, 2135 writeMode == WRITE_BLOCKING); 2136 } 2137 2138 if ((mDataLoadMode == MODE_STATIC) 2139 && (mState == STATE_NO_STATIC_DATA) 2140 && (ret > 0)) { 2141 // benign race with respect to other APIs that read mState 2142 mState = STATE_INITIALIZED; 2143 } 2144 2145 if (ret > 0) { 2146 audioData.position(audioData.position() + ret); 2147 } 2148 2149 return ret; 2150 } 2151 2152 /** 2153 * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track. 2154 * The blocking behavior will depend on the write mode. 2155 * @param audioData the buffer that holds the data to write, starting at the position reported 2156 * by <code>audioData.position()</code>. 2157 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2158 * have been advanced to reflect the amount of data that was successfully written to 2159 * the AudioTrack. 2160 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2161 * that the number of bytes requested be a multiple of the frame size (sample size in 2162 * bytes multiplied by the channel count). 2163 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2164 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. 2165 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2166 * to the audio sink. 2167 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2168 * queuing as much audio data for playback as possible without blocking. 2169 * @param timestamp The timestamp of the first decodable audio frame in the provided audioData. 2170 * @return zero or a positive number of bytes that were written, or 2171 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 2172 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2173 * needs to be recreated. 2174 * The dead object error code is not returned if some data was successfully transferred. 2175 * In this case, the error is returned at the next write(). 2176 */ 2177 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2178 @WriteMode int writeMode, long timestamp) { 2179 2180 if (mState == STATE_UNINITIALIZED) { 2181 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2182 return ERROR_INVALID_OPERATION; 2183 } 2184 2185 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2186 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2187 return ERROR_BAD_VALUE; 2188 } 2189 2190 if (mDataLoadMode != MODE_STREAM) { 2191 Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track"); 2192 return ERROR_INVALID_OPERATION; 2193 } 2194 2195 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) { 2196 Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts..."); 2197 return write(audioData, sizeInBytes, writeMode); 2198 } 2199 2200 if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2201 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2202 return ERROR_BAD_VALUE; 2203 } 2204 2205 // create timestamp header if none exists 2206 if (mAvSyncHeader == null) { 2207 mAvSyncHeader = ByteBuffer.allocate(16); 2208 mAvSyncHeader.order(ByteOrder.BIG_ENDIAN); 2209 mAvSyncHeader.putInt(0x55550001); 2210 mAvSyncHeader.putInt(sizeInBytes); 2211 mAvSyncHeader.putLong(timestamp); 2212 mAvSyncHeader.position(0); 2213 mAvSyncBytesRemaining = sizeInBytes; 2214 } 2215 2216 // write timestamp header if not completely written already 2217 int ret = 0; 2218 if (mAvSyncHeader.remaining() != 0) { 2219 ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode); 2220 if (ret < 0) { 2221 Log.e(TAG, "AudioTrack.write() could not write timestamp header!"); 2222 mAvSyncHeader = null; 2223 mAvSyncBytesRemaining = 0; 2224 return ret; 2225 } 2226 if (mAvSyncHeader.remaining() > 0) { 2227 Log.v(TAG, "AudioTrack.write() partial timestamp header written."); 2228 return 0; 2229 } 2230 } 2231 2232 // write audio data 2233 int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes); 2234 ret = write(audioData, sizeToWrite, writeMode); 2235 if (ret < 0) { 2236 Log.e(TAG, "AudioTrack.write() could not write audio data!"); 2237 mAvSyncHeader = null; 2238 mAvSyncBytesRemaining = 0; 2239 return ret; 2240 } 2241 2242 mAvSyncBytesRemaining -= ret; 2243 if (mAvSyncBytesRemaining == 0) { 2244 mAvSyncHeader = null; 2245 } 2246 2247 return ret; 2248 } 2249 2250 2251 /** 2252 * Sets the playback head position within the static buffer to zero, 2253 * that is it rewinds to start of static buffer. 2254 * The track must be stopped or paused, and 2255 * the track's creation mode must be {@link #MODE_STATIC}. 2256 * <p> 2257 * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by 2258 * {@link #getPlaybackHeadPosition()} to zero. 2259 * For earlier API levels, the reset behavior is unspecified. 2260 * <p> 2261 * Use {@link #setPlaybackHeadPosition(int)} with a zero position 2262 * if the reset of <code>getPlaybackHeadPosition()</code> is not needed. 2263 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2264 * {@link #ERROR_INVALID_OPERATION} 2265 */ 2266 public int reloadStaticData() { 2267 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 2268 return ERROR_INVALID_OPERATION; 2269 } 2270 return native_reload_static(); 2271 } 2272 2273 //-------------------------------------------------------------------------- 2274 // Audio effects management 2275 //-------------------- 2276 2277 /** 2278 * Attaches an auxiliary effect to the audio track. A typical auxiliary 2279 * effect is a reverberation effect which can be applied on any sound source 2280 * that directs a certain amount of its energy to this effect. This amount 2281 * is defined by setAuxEffectSendLevel(). 2282 * {@see #setAuxEffectSendLevel(float)}. 2283 * <p>After creating an auxiliary effect (e.g. 2284 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 2285 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 2286 * this method to attach the audio track to the effect. 2287 * <p>To detach the effect from the audio track, call this method with a 2288 * null effect id. 2289 * 2290 * @param effectId system wide unique id of the effect to attach 2291 * @return error code or success, see {@link #SUCCESS}, 2292 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 2293 */ 2294 public int attachAuxEffect(int effectId) { 2295 if (mState == STATE_UNINITIALIZED) { 2296 return ERROR_INVALID_OPERATION; 2297 } 2298 return native_attachAuxEffect(effectId); 2299 } 2300 2301 /** 2302 * Sets the send level of the audio track to the attached auxiliary effect 2303 * {@link #attachAuxEffect(int)}. Effect levels 2304 * are clamped to the closed interval [0.0, max] where 2305 * max is the value of {@link #getMaxVolume}. 2306 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 2307 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 2308 * this method must be called for the effect to be applied. 2309 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 2310 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 2311 * so an appropriate conversion from linear UI input x to level is: 2312 * x == 0 -> level = 0 2313 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 2314 * 2315 * @param level linear send level 2316 * @return error code or success, see {@link #SUCCESS}, 2317 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 2318 */ 2319 public int setAuxEffectSendLevel(float level) { 2320 if (isRestricted()) { 2321 return SUCCESS; 2322 } 2323 if (mState == STATE_UNINITIALIZED) { 2324 return ERROR_INVALID_OPERATION; 2325 } 2326 level = clampGainOrLevel(level); 2327 int err = native_setAuxEffectSendLevel(level); 2328 return err == 0 ? SUCCESS : ERROR; 2329 } 2330 2331 //-------------------------------------------------------------------------- 2332 // Explicit Routing 2333 //-------------------- 2334 private AudioDeviceInfo mPreferredDevice = null; 2335 2336 /** 2337 * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route 2338 * the output from this AudioTrack. 2339 * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink. 2340 * If deviceInfo is null, default routing is restored. 2341 * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and 2342 * does not correspond to a valid audio output device. 2343 */ 2344 public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) { 2345 // Do some validation.... 2346 if (deviceInfo != null && !deviceInfo.isSink()) { 2347 return false; 2348 } 2349 int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0; 2350 boolean status = native_setOutputDevice(preferredDeviceId); 2351 if (status == true) { 2352 synchronized (this) { 2353 mPreferredDevice = deviceInfo; 2354 } 2355 } 2356 return status; 2357 } 2358 2359 /** 2360 * Returns the selected output specified by {@link #setPreferredDevice}. Note that this 2361 * is not guaranteed to correspond to the actual device being used for playback. 2362 */ 2363 public AudioDeviceInfo getPreferredDevice() { 2364 synchronized (this) { 2365 return mPreferredDevice; 2366 } 2367 } 2368 2369 /** 2370 * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack. 2371 * Note: The query is only valid if the AudioTrack is currently playing. If it is not, 2372 * <code>getRoutedDevice()</code> will return null. 2373 */ 2374 public AudioDeviceInfo getRoutedDevice() { 2375 int deviceId = native_getRoutedDeviceId(); 2376 if (deviceId == 0) { 2377 return null; 2378 } 2379 AudioDeviceInfo[] devices = 2380 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS); 2381 for (int i = 0; i < devices.length; i++) { 2382 if (devices[i].getId() == deviceId) { 2383 return devices[i]; 2384 } 2385 } 2386 return null; 2387 } 2388 2389 /* 2390 * Call BEFORE adding a routing callback handler. 2391 */ 2392 private void testEnableNativeRoutingCallbacks() { 2393 if (mRoutingChangeListeners.size() == 0 && mNewRoutingChangeListeners.size() == 0) { 2394 native_enableDeviceCallback(); 2395 } 2396 } 2397 2398 /* 2399 * Call AFTER removing a routing callback handler. 2400 */ 2401 private void testDisableNativeRoutingCallbacks() { 2402 if (mRoutingChangeListeners.size() == 0 && mNewRoutingChangeListeners.size() == 0) { 2403 native_disableDeviceCallback(); 2404 } 2405 } 2406 2407 //-------------------------------------------------------------------------- 2408 // >= "N" (Re)Routing Info 2409 //-------------------- 2410 /** 2411 * The list of AudioRouting.OnRoutingChangedListener interfaces added (with 2412 * {@link AudioTrack#addOnRoutingListener(AudioRouting.OnRoutingChangedListener, 2413 * android.os.Handler)} 2414 * by an app to receive (re)routing notifications. 2415 */ 2416 private ArrayMap<AudioRouting.OnRoutingChangedListener, NativeNewRoutingEventHandlerDelegate> 2417 mNewRoutingChangeListeners = 2418 new ArrayMap<AudioRouting.OnRoutingChangedListener, NativeNewRoutingEventHandlerDelegate>(); 2419 2420 /** 2421 * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing 2422 * changes on this AudioTrack. 2423 * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive 2424 * notifications of rerouting events. 2425 * @param handler Specifies the {@link Handler} object for the thread on which to execute 2426 * the callback. If <code>null</code>, the {@link Handler} associated with the main 2427 * {@link Looper} will be used. 2428 */ 2429 public void addOnRoutingListener(AudioRouting.OnRoutingChangedListener listener, 2430 Handler handler) { 2431 if (listener != null && !mNewRoutingChangeListeners.containsKey(listener)) { 2432 synchronized (mNewRoutingChangeListeners) { 2433 testEnableNativeRoutingCallbacks(); 2434 mNewRoutingChangeListeners.put( 2435 listener, new NativeNewRoutingEventHandlerDelegate(this, listener, 2436 handler != null ? handler : new Handler(mInitializationLooper))); 2437 } 2438 } 2439 } 2440 2441 /** 2442 * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added 2443 * to receive rerouting notifications. 2444 * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface 2445 * to remove. 2446 */ 2447 public void removeOnRoutingListener(AudioRouting.OnRoutingChangedListener listener) { 2448 if (mNewRoutingChangeListeners.containsKey(listener)) { 2449 mNewRoutingChangeListeners.remove(listener); 2450 } 2451 testDisableNativeRoutingCallbacks(); 2452 } 2453 2454 //-------------------------------------------------------------------------- 2455 // Marshmallow (Re)Routing Info 2456 //-------------------- 2457 /** 2458 * Defines the interface by which applications can receive notifications of routing 2459 * changes for the associated {@link AudioTrack}. 2460 */ 2461 @Deprecated 2462 public interface OnRoutingChangedListener { 2463 /** 2464 * Called when the routing of an AudioTrack changes from either and explicit or 2465 * policy rerouting. Use {@link #getRoutedDevice()} to retrieve the newly routed-to 2466 * device. 2467 */ 2468 @Deprecated 2469 public void onRoutingChanged(AudioTrack audioTrack); 2470 } 2471 2472 /** 2473 * The list of AudioTrack.OnRoutingChangedListener interfaces added (with 2474 * {@link AudioTrack#addOnRoutingChangedListener(OnRoutingChangedListener, android.os.Handler)} 2475 * by an app to receive (re)routing notifications. 2476 */ 2477 private ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate> 2478 mRoutingChangeListeners = 2479 new ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate>(); 2480 2481 /** 2482 * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes 2483 * on this AudioTrack. 2484 * @param listener The {@link OnRoutingChangedListener} interface to receive notifications 2485 * of rerouting events. 2486 * @param handler Specifies the {@link Handler} object for the thread on which to execute 2487 * the callback. If <code>null</code>, the {@link Handler} associated with the main 2488 * {@link Looper} will be used. 2489 */ 2490 @Deprecated 2491 public void addOnRoutingChangedListener(OnRoutingChangedListener listener, 2492 android.os.Handler handler) { 2493 if (listener != null && !mRoutingChangeListeners.containsKey(listener)) { 2494 synchronized (mRoutingChangeListeners) { 2495 testEnableNativeRoutingCallbacks(); 2496 mRoutingChangeListeners.put( 2497 listener, new NativeRoutingEventHandlerDelegate(this, listener, 2498 handler != null ? handler : new Handler(mInitializationLooper))); 2499 } 2500 } 2501 } 2502 2503 /** 2504 * Removes an {@link OnRoutingChangedListener} which has been previously added 2505 * to receive rerouting notifications. 2506 * @param listener The previously added {@link OnRoutingChangedListener} interface to remove. 2507 */ 2508 @Deprecated 2509 public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) { 2510 synchronized (mRoutingChangeListeners) { 2511 if (mRoutingChangeListeners.containsKey(listener)) { 2512 mRoutingChangeListeners.remove(listener); 2513 } 2514 testDisableNativeRoutingCallbacks(); 2515 } 2516 } 2517 2518 /** 2519 * Sends device list change notification to all listeners. 2520 */ 2521 private void broadcastRoutingChange() { 2522 AudioManager.resetAudioPortGeneration(); 2523 2524 // Marshmallow Routing 2525 Collection<NativeRoutingEventHandlerDelegate> values; 2526 synchronized (mRoutingChangeListeners) { 2527 values = mRoutingChangeListeners.values(); 2528 } 2529 for(NativeRoutingEventHandlerDelegate delegate : values) { 2530 Handler handler = delegate.getHandler(); 2531 if (handler != null) { 2532 handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE); 2533 } 2534 } 2535 // >= "N" Routing 2536 Collection<NativeNewRoutingEventHandlerDelegate> newValues; 2537 synchronized (mNewRoutingChangeListeners) { 2538 newValues = mNewRoutingChangeListeners.values(); 2539 } 2540 for(NativeNewRoutingEventHandlerDelegate delegate : newValues) { 2541 Handler handler = delegate.getHandler(); 2542 if (handler != null) { 2543 handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE); 2544 } 2545 } 2546 } 2547 2548 //--------------------------------------------------------- 2549 // Interface definitions 2550 //-------------------- 2551 /** 2552 * Interface definition for a callback to be invoked when the playback head position of 2553 * an AudioTrack has reached a notification marker or has increased by a certain period. 2554 */ 2555 public interface OnPlaybackPositionUpdateListener { 2556 /** 2557 * Called on the listener to notify it that the previously set marker has been reached 2558 * by the playback head. 2559 */ 2560 void onMarkerReached(AudioTrack track); 2561 2562 /** 2563 * Called on the listener to periodically notify it that the playback head has reached 2564 * a multiple of the notification period. 2565 */ 2566 void onPeriodicNotification(AudioTrack track); 2567 } 2568 2569 //--------------------------------------------------------- 2570 // Inner classes 2571 //-------------------- 2572 /** 2573 * Helper class to handle the forwarding of native events to the appropriate listener 2574 * (potentially) handled in a different thread 2575 */ 2576 private class NativePositionEventHandlerDelegate { 2577 private final Handler mHandler; 2578 2579 NativePositionEventHandlerDelegate(final AudioTrack track, 2580 final OnPlaybackPositionUpdateListener listener, 2581 Handler handler) { 2582 // find the looper for our new event handler 2583 Looper looper; 2584 if (handler != null) { 2585 looper = handler.getLooper(); 2586 } else { 2587 // no given handler, use the looper the AudioTrack was created in 2588 looper = mInitializationLooper; 2589 } 2590 2591 // construct the event handler with this looper 2592 if (looper != null) { 2593 // implement the event handler delegate 2594 mHandler = new Handler(looper) { 2595 @Override 2596 public void handleMessage(Message msg) { 2597 if (track == null) { 2598 return; 2599 } 2600 switch(msg.what) { 2601 case NATIVE_EVENT_MARKER: 2602 if (listener != null) { 2603 listener.onMarkerReached(track); 2604 } 2605 break; 2606 case NATIVE_EVENT_NEW_POS: 2607 if (listener != null) { 2608 listener.onPeriodicNotification(track); 2609 } 2610 break; 2611 default: 2612 loge("Unknown native event type: " + msg.what); 2613 break; 2614 } 2615 } 2616 }; 2617 } else { 2618 mHandler = null; 2619 } 2620 } 2621 2622 Handler getHandler() { 2623 return mHandler; 2624 } 2625 } 2626 2627 /** 2628 * Marshmallow Routing API. 2629 * Helper class to handle the forwarding of native events to the appropriate listener 2630 * (potentially) handled in a different thread 2631 */ 2632 private class NativeRoutingEventHandlerDelegate { 2633 private final Handler mHandler; 2634 2635 NativeRoutingEventHandlerDelegate(final AudioTrack track, 2636 final OnRoutingChangedListener listener, 2637 Handler handler) { 2638 // find the looper for our new event handler 2639 Looper looper; 2640 if (handler != null) { 2641 looper = handler.getLooper(); 2642 } else { 2643 // no given handler, use the looper the AudioTrack was created in 2644 looper = mInitializationLooper; 2645 } 2646 2647 // construct the event handler with this looper 2648 if (looper != null) { 2649 // implement the event handler delegate 2650 mHandler = new Handler(looper) { 2651 @Override 2652 public void handleMessage(Message msg) { 2653 if (track == null) { 2654 return; 2655 } 2656 switch(msg.what) { 2657 case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE: 2658 if (listener != null) { 2659 listener.onRoutingChanged(track); 2660 } 2661 break; 2662 default: 2663 loge("Unknown native event type: " + msg.what); 2664 break; 2665 } 2666 } 2667 }; 2668 } else { 2669 mHandler = null; 2670 } 2671 } 2672 2673 Handler getHandler() { 2674 return mHandler; 2675 } 2676 } 2677 2678 /** 2679 * Marshmallow Routing API. 2680 * Helper class to handle the forwarding of native events to the appropriate listener 2681 * (potentially) handled in a different thread 2682 */ 2683 private class NativeNewRoutingEventHandlerDelegate { 2684 private final Handler mHandler; 2685 2686 NativeNewRoutingEventHandlerDelegate(final AudioTrack track, 2687 final AudioRouting.OnRoutingChangedListener listener, 2688 Handler handler) { 2689 // find the looper for our new event handler 2690 Looper looper; 2691 if (handler != null) { 2692 looper = handler.getLooper(); 2693 } else { 2694 // no given handler, use the looper the AudioTrack was created in 2695 looper = mInitializationLooper; 2696 } 2697 2698 // construct the event handler with this looper 2699 if (looper != null) { 2700 // implement the event handler delegate 2701 mHandler = new Handler(looper) { 2702 @Override 2703 public void handleMessage(Message msg) { 2704 if (track == null) { 2705 return; 2706 } 2707 switch(msg.what) { 2708 case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE: 2709 if (listener != null) { 2710 listener.onRoutingChanged(track); 2711 } 2712 break; 2713 default: 2714 loge("Unknown native event type: " + msg.what); 2715 break; 2716 } 2717 } 2718 }; 2719 } else { 2720 mHandler = null; 2721 } 2722 } 2723 2724 Handler getHandler() { 2725 return mHandler; 2726 } 2727 } 2728 2729 //--------------------------------------------------------- 2730 // Java methods called from the native side 2731 //-------------------- 2732 @SuppressWarnings("unused") 2733 private static void postEventFromNative(Object audiotrack_ref, 2734 int what, int arg1, int arg2, Object obj) { 2735 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 2736 AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get(); 2737 if (track == null) { 2738 return; 2739 } 2740 2741 if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) { 2742 track.broadcastRoutingChange(); 2743 return; 2744 } 2745 NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate; 2746 if (delegate != null) { 2747 Handler handler = delegate.getHandler(); 2748 if (handler != null) { 2749 Message m = handler.obtainMessage(what, arg1, arg2, obj); 2750 handler.sendMessage(m); 2751 } 2752 } 2753 } 2754 2755 2756 //--------------------------------------------------------- 2757 // Native methods called from the Java side 2758 //-------------------- 2759 2760 // post-condition: mStreamType is overwritten with a value 2761 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 2762 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC 2763 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 2764 Object /*AudioAttributes*/ attributes, 2765 int sampleRate, int channelMask, int channelIndexMask, int audioFormat, 2766 int buffSizeInBytes, int mode, int[] sessionId); 2767 2768 private native final void native_finalize(); 2769 2770 private native final void native_release(); 2771 2772 private native final void native_start(); 2773 2774 private native final void native_stop(); 2775 2776 private native final void native_pause(); 2777 2778 private native final void native_flush(); 2779 2780 private native final int native_write_byte(byte[] audioData, 2781 int offsetInBytes, int sizeInBytes, int format, 2782 boolean isBlocking); 2783 2784 private native final int native_write_short(short[] audioData, 2785 int offsetInShorts, int sizeInShorts, int format, 2786 boolean isBlocking); 2787 2788 private native final int native_write_float(float[] audioData, 2789 int offsetInFloats, int sizeInFloats, int format, 2790 boolean isBlocking); 2791 2792 private native final int native_write_native_bytes(Object audioData, 2793 int positionInBytes, int sizeInBytes, int format, boolean blocking); 2794 2795 private native final int native_reload_static(); 2796 2797 private native final int native_get_buffer_size_frames(); 2798 private native final int native_set_buffer_size_frames(int bufferSizeInFrames); 2799 private native final int native_get_buffer_capacity_frames(); 2800 2801 private native final void native_setVolume(float leftVolume, float rightVolume); 2802 2803 private native final int native_set_playback_rate(int sampleRateInHz); 2804 private native final int native_get_playback_rate(); 2805 2806 private native final void native_set_playback_params(@NonNull PlaybackParams params); 2807 private native final @NonNull PlaybackParams native_get_playback_params(); 2808 2809 private native final int native_set_marker_pos(int marker); 2810 private native final int native_get_marker_pos(); 2811 2812 private native final int native_set_pos_update_period(int updatePeriod); 2813 private native final int native_get_pos_update_period(); 2814 2815 private native final int native_set_position(int position); 2816 private native final int native_get_position(); 2817 2818 private native final int native_get_latency(); 2819 2820 private native final int native_get_underrun_count(); 2821 2822 // longArray must be a non-null array of length >= 2 2823 // [0] is assigned the frame position 2824 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds 2825 private native final int native_get_timestamp(long[] longArray); 2826 2827 private native final int native_set_loop(int start, int end, int loopCount); 2828 2829 static private native final int native_get_output_sample_rate(int streamType); 2830 static private native final int native_get_min_buff_size( 2831 int sampleRateInHz, int channelConfig, int audioFormat); 2832 2833 private native final int native_attachAuxEffect(int effectId); 2834 private native final int native_setAuxEffectSendLevel(float level); 2835 2836 private native final boolean native_setOutputDevice(int deviceId); 2837 private native final int native_getRoutedDeviceId(); 2838 private native final void native_enableDeviceCallback(); 2839 private native final void native_disableDeviceCallback(); 2840 static private native int native_get_FCC_8(); 2841 2842 //--------------------------------------------------------- 2843 // Utility methods 2844 //------------------ 2845 2846 private static void logd(String msg) { 2847 Log.d(TAG, msg); 2848 } 2849 2850 private static void loge(String msg) { 2851 Log.e(TAG, msg); 2852 } 2853} 2854