AudioTrack.java revision 3c86a343dfca1b9e2e28c240dc894f60709e392c
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17package android.media; 18 19import java.lang.annotation.Retention; 20import java.lang.annotation.RetentionPolicy; 21import java.lang.ref.WeakReference; 22import java.lang.Math; 23import java.nio.ByteBuffer; 24import java.nio.ByteOrder; 25import java.nio.NioUtils; 26import java.util.Collection; 27 28import android.annotation.IntDef; 29import android.annotation.NonNull; 30import android.app.ActivityThread; 31import android.content.Context; 32import android.os.Handler; 33import android.os.IBinder; 34import android.os.Looper; 35import android.os.Message; 36import android.os.Process; 37import android.os.RemoteException; 38import android.os.ServiceManager; 39import android.util.ArrayMap; 40import android.util.Log; 41 42import com.android.internal.annotations.GuardedBy; 43 44/** 45 * The AudioTrack class manages and plays a single audio resource for Java applications. 46 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 47 * achieved by "pushing" the data to the AudioTrack object using one of the 48 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 49 * and {@link #write(float[], int, int, int)} methods. 50 * 51 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 52 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 53 * one of the {@code write()} methods. These are blocking and return when the data has been 54 * transferred from the Java layer to the native layer and queued for playback. The streaming 55 * mode is most useful when playing blocks of audio data that for instance are: 56 * 57 * <ul> 58 * <li>too big to fit in memory because of the duration of the sound to play,</li> 59 * <li>too big to fit in memory because of the characteristics of the audio data 60 * (high sampling rate, bits per sample ...)</li> 61 * <li>received or generated while previously queued audio is playing.</li> 62 * </ul> 63 * 64 * The static mode should be chosen when dealing with short sounds that fit in memory and 65 * that need to be played with the smallest latency possible. The static mode will 66 * therefore be preferred for UI and game sounds that are played often, and with the 67 * smallest overhead possible. 68 * 69 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 70 * The size of this buffer, specified during the construction, determines how long an AudioTrack 71 * can play before running out of data.<br> 72 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 73 * be played from it.<br> 74 * For the streaming mode, data will be written to the audio sink in chunks of 75 * sizes less than or equal to the total buffer size. 76 * 77 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 78 */ 79public class AudioTrack extends PlayerBase 80 implements AudioRouting 81{ 82 //--------------------------------------------------------- 83 // Constants 84 //-------------------- 85 /** Minimum value for a linear gain or auxiliary effect level. 86 * This value must be exactly equal to 0.0f; do not change it. 87 */ 88 private static final float GAIN_MIN = 0.0f; 89 /** Maximum value for a linear gain or auxiliary effect level. 90 * This value must be greater than or equal to 1.0f. 91 */ 92 private static final float GAIN_MAX = 1.0f; 93 94 /** Maximum value for AudioTrack channel count 95 * @hide public for MediaCode only, do not un-hide or change to a numeric literal 96 */ 97 public static final int CHANNEL_COUNT_MAX = native_get_FCC_8(); 98 99 /** indicates AudioTrack state is stopped */ 100 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 101 /** indicates AudioTrack state is paused */ 102 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 103 /** indicates AudioTrack state is playing */ 104 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 105 106 // keep these values in sync with android_media_AudioTrack.cpp 107 /** 108 * Creation mode where audio data is transferred from Java to the native layer 109 * only once before the audio starts playing. 110 */ 111 public static final int MODE_STATIC = 0; 112 /** 113 * Creation mode where audio data is streamed from Java to the native layer 114 * as the audio is playing. 115 */ 116 public static final int MODE_STREAM = 1; 117 118 /** @hide */ 119 @IntDef({ 120 MODE_STATIC, 121 MODE_STREAM 122 }) 123 @Retention(RetentionPolicy.SOURCE) 124 public @interface TransferMode {} 125 126 /** 127 * State of an AudioTrack that was not successfully initialized upon creation. 128 */ 129 public static final int STATE_UNINITIALIZED = 0; 130 /** 131 * State of an AudioTrack that is ready to be used. 132 */ 133 public static final int STATE_INITIALIZED = 1; 134 /** 135 * State of a successfully initialized AudioTrack that uses static data, 136 * but that hasn't received that data yet. 137 */ 138 public static final int STATE_NO_STATIC_DATA = 2; 139 140 /** 141 * Denotes a successful operation. 142 */ 143 public static final int SUCCESS = AudioSystem.SUCCESS; 144 /** 145 * Denotes a generic operation failure. 146 */ 147 public static final int ERROR = AudioSystem.ERROR; 148 /** 149 * Denotes a failure due to the use of an invalid value. 150 */ 151 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 152 /** 153 * Denotes a failure due to the improper use of a method. 154 */ 155 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 156 /** 157 * An error code indicating that the object reporting it is no longer valid and needs to 158 * be recreated. 159 * @hide 160 */ 161 public static final int ERROR_DEAD_OBJECT = AudioSystem.DEAD_OBJECT; 162 /** 163 * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state, 164 * or immediately after start/ACTIVE. 165 * @hide 166 */ 167 public static final int ERROR_WOULD_BLOCK = AudioSystem.WOULD_BLOCK; 168 169 // Error codes: 170 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 171 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 172 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 173 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 174 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 175 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 176 177 // Events: 178 // to keep in sync with frameworks/av/include/media/AudioTrack.h 179 /** 180 * Event id denotes when playback head has reached a previously set marker. 181 */ 182 private static final int NATIVE_EVENT_MARKER = 3; 183 /** 184 * Event id denotes when previously set update period has elapsed during playback. 185 */ 186 private static final int NATIVE_EVENT_NEW_POS = 4; 187 188 private final static String TAG = "android.media.AudioTrack"; 189 190 191 /** @hide */ 192 @IntDef({ 193 WRITE_BLOCKING, 194 WRITE_NON_BLOCKING 195 }) 196 @Retention(RetentionPolicy.SOURCE) 197 public @interface WriteMode {} 198 199 /** 200 * The write mode indicating the write operation will block until all data has been written, 201 * to be used as the actual value of the writeMode parameter in 202 * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)}, 203 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 204 * {@link #write(ByteBuffer, int, int, long)}. 205 */ 206 public final static int WRITE_BLOCKING = 0; 207 208 /** 209 * The write mode indicating the write operation will return immediately after 210 * queuing as much audio data for playback as possible without blocking, 211 * to be used as the actual value of the writeMode parameter in 212 * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)}, 213 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 214 * {@link #write(ByteBuffer, int, int, long)}. 215 */ 216 public final static int WRITE_NON_BLOCKING = 1; 217 218 //-------------------------------------------------------------------------- 219 // Member variables 220 //-------------------- 221 /** 222 * Indicates the state of the AudioTrack instance. 223 * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA. 224 */ 225 private int mState = STATE_UNINITIALIZED; 226 /** 227 * Indicates the play state of the AudioTrack instance. 228 * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING. 229 */ 230 private int mPlayState = PLAYSTATE_STOPPED; 231 /** 232 * Lock to ensure mPlayState updates reflect the actual state of the object. 233 */ 234 private final Object mPlayStateLock = new Object(); 235 /** 236 * Sizes of the native audio buffer. 237 * These values are set during construction and can be stale. 238 * To obtain the current native audio buffer frame count use {@link #getBufferSizeInFrames()}. 239 */ 240 private int mNativeBufferSizeInBytes = 0; 241 private int mNativeBufferSizeInFrames = 0; 242 /** 243 * Handler for events coming from the native code. 244 */ 245 private NativePositionEventHandlerDelegate mEventHandlerDelegate; 246 /** 247 * Looper associated with the thread that creates the AudioTrack instance. 248 */ 249 private final Looper mInitializationLooper; 250 /** 251 * The audio data source sampling rate in Hz. 252 * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}. 253 */ 254 private int mSampleRate; // initialized by all constructors via audioParamCheck() 255 /** 256 * The number of audio output channels (1 is mono, 2 is stereo, etc.). 257 */ 258 private int mChannelCount = 1; 259 /** 260 * The audio channel mask used for calling native AudioTrack 261 */ 262 private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 263 264 /** 265 * The type of the audio stream to play. See 266 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 267 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 268 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 269 * {@link AudioManager#STREAM_DTMF}. 270 */ 271 private int mStreamType = AudioManager.STREAM_MUSIC; 272 273 /** 274 * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM. 275 */ 276 private int mDataLoadMode = MODE_STREAM; 277 /** 278 * The current channel position mask, as specified on AudioTrack creation. 279 * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}. 280 * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified. 281 */ 282 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 283 /** 284 * The channel index mask if specified, otherwise 0. 285 */ 286 private int mChannelIndexMask = 0; 287 /** 288 * The encoding of the audio samples. 289 * @see AudioFormat#ENCODING_PCM_8BIT 290 * @see AudioFormat#ENCODING_PCM_16BIT 291 * @see AudioFormat#ENCODING_PCM_FLOAT 292 */ 293 private int mAudioFormat; // initialized by all constructors via audioParamCheck() 294 /** 295 * Audio session ID 296 */ 297 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 298 /** 299 * HW_AV_SYNC track AV Sync Header 300 */ 301 private ByteBuffer mAvSyncHeader = null; 302 /** 303 * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header 304 */ 305 private int mAvSyncBytesRemaining = 0; 306 307 //-------------------------------- 308 // Used exclusively by native code 309 //-------------------- 310 /** 311 * @hide 312 * Accessed by native methods: provides access to C++ AudioTrack object. 313 */ 314 @SuppressWarnings("unused") 315 protected long mNativeTrackInJavaObj; 316 /** 317 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 318 * the native AudioTrack object, but not stored in it). 319 */ 320 @SuppressWarnings("unused") 321 private long mJniData; 322 323 324 //-------------------------------------------------------------------------- 325 // Constructor, Finalize 326 //-------------------- 327 /** 328 * Class constructor. 329 * @param streamType the type of the audio stream. See 330 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 331 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 332 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 333 * @param sampleRateInHz the initial source sample rate expressed in Hz. 334 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 335 * which is usually the sample rate of the sink. 336 * {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen. 337 * @param channelConfig describes the configuration of the audio channels. 338 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 339 * {@link AudioFormat#CHANNEL_OUT_STEREO} 340 * @param audioFormat the format in which the audio data is represented. 341 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 342 * {@link AudioFormat#ENCODING_PCM_8BIT}, 343 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 344 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 345 * read from for playback. This should be a multiple of the frame size in bytes. 346 * <p> If the track's creation mode is {@link #MODE_STATIC}, 347 * this is the maximum length sample, or audio clip, that can be played by this instance. 348 * <p> If the track's creation mode is {@link #MODE_STREAM}, 349 * this should be the desired buffer size 350 * for the <code>AudioTrack</code> to satisfy the application's 351 * natural latency requirements. 352 * If <code>bufferSizeInBytes</code> is less than the 353 * minimum buffer size for the output sink, it is automatically increased to the minimum 354 * buffer size. 355 * The method {@link #getBufferSizeInFrames()} returns the 356 * actual size in frames of the native buffer created, which 357 * determines the frequency to write 358 * to the streaming <code>AudioTrack</code> to avoid underrun. 359 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 360 * @throws java.lang.IllegalArgumentException 361 */ 362 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 363 int bufferSizeInBytes, int mode) 364 throws IllegalArgumentException { 365 this(streamType, sampleRateInHz, channelConfig, audioFormat, 366 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE); 367 } 368 369 /** 370 * Class constructor with audio session. Use this constructor when the AudioTrack must be 371 * attached to a particular audio session. The primary use of the audio session ID is to 372 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 373 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 374 * and media players in the same session and not to the output mix. 375 * When an AudioTrack is created without specifying a session, it will create its own session 376 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 377 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 378 * session 379 * with all other media players or audio tracks in the same session, otherwise a new session 380 * will be created for this track if none is supplied. 381 * @param streamType the type of the audio stream. See 382 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 383 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 384 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 385 * @param sampleRateInHz the initial source sample rate expressed in Hz. 386 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 387 * which is usually the sample rate of the sink. 388 * @param channelConfig describes the configuration of the audio channels. 389 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 390 * {@link AudioFormat#CHANNEL_OUT_STEREO} 391 * @param audioFormat the format in which the audio data is represented. 392 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 393 * {@link AudioFormat#ENCODING_PCM_8BIT}, 394 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 395 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 396 * from for playback. If using the AudioTrack in streaming mode, you can write data into 397 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 398 * this is the maximum size of the sound that will be played for this instance. 399 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 400 * for the successful creation of an AudioTrack instance in streaming mode. Using values 401 * smaller than getMinBufferSize() will result in an initialization failure. 402 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 403 * @param sessionId Id of audio session the AudioTrack must be attached to 404 * @throws java.lang.IllegalArgumentException 405 */ 406 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 407 int bufferSizeInBytes, int mode, int sessionId) 408 throws IllegalArgumentException { 409 // mState already == STATE_UNINITIALIZED 410 this((new AudioAttributes.Builder()) 411 .setLegacyStreamType(streamType) 412 .build(), 413 (new AudioFormat.Builder()) 414 .setChannelMask(channelConfig) 415 .setEncoding(audioFormat) 416 .setSampleRate(sampleRateInHz) 417 .build(), 418 bufferSizeInBytes, 419 mode, sessionId); 420 } 421 422 /** 423 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 424 * @param attributes a non-null {@link AudioAttributes} instance. 425 * @param format a non-null {@link AudioFormat} instance describing the format of the data 426 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 427 * configuring the audio format parameters such as encoding, channel mask and sample rate. 428 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 429 * from for playback. If using the AudioTrack in streaming mode, you can write data into 430 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 431 * this is the maximum size of the sound that will be played for this instance. 432 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 433 * for the successful creation of an AudioTrack instance in streaming mode. Using values 434 * smaller than getMinBufferSize() will result in an initialization failure. 435 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 436 * @param sessionId ID of audio session the AudioTrack must be attached to, or 437 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 438 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 439 * construction. 440 * @throws IllegalArgumentException 441 */ 442 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 443 int mode, int sessionId) 444 throws IllegalArgumentException { 445 super(attributes); 446 // mState already == STATE_UNINITIALIZED 447 448 if (format == null) { 449 throw new IllegalArgumentException("Illegal null AudioFormat"); 450 } 451 452 // remember which looper is associated with the AudioTrack instantiation 453 Looper looper; 454 if ((looper = Looper.myLooper()) == null) { 455 looper = Looper.getMainLooper(); 456 } 457 458 int rate = format.getSampleRate(); 459 if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 460 rate = 0; 461 } 462 463 int channelIndexMask = 0; 464 if ((format.getPropertySetMask() 465 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { 466 channelIndexMask = format.getChannelIndexMask(); 467 } 468 int channelMask = 0; 469 if ((format.getPropertySetMask() 470 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { 471 channelMask = format.getChannelMask(); 472 } else if (channelIndexMask == 0) { // if no masks at all, use stereo 473 channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT 474 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 475 } 476 int encoding = AudioFormat.ENCODING_DEFAULT; 477 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 478 encoding = format.getEncoding(); 479 } 480 audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); 481 mStreamType = AudioSystem.STREAM_DEFAULT; 482 483 audioBuffSizeCheck(bufferSizeInBytes); 484 485 mInitializationLooper = looper; 486 487 if (sessionId < 0) { 488 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 489 } 490 491 int[] sampleRate = new int[] {mSampleRate}; 492 int[] session = new int[1]; 493 session[0] = sessionId; 494 // native initialization 495 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 496 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat, 497 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/); 498 if (initResult != SUCCESS) { 499 loge("Error code "+initResult+" when initializing AudioTrack."); 500 return; // with mState == STATE_UNINITIALIZED 501 } 502 503 mSampleRate = sampleRate[0]; 504 mSessionId = session[0]; 505 506 if (mDataLoadMode == MODE_STATIC) { 507 mState = STATE_NO_STATIC_DATA; 508 } else { 509 mState = STATE_INITIALIZED; 510 } 511 } 512 513 /** 514 * A constructor which explicitly connects a Native (C++) AudioTrack. For use by 515 * the AudioTrackRoutingProxy subclass. 516 * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack 517 * (associated with an OpenSL ES player). 518 * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy, 519 * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj 520 * it means that the OpenSL player interface hasn't been realized, so there is no native 521 * Audiotrack to connect to. In this case wait to call deferred_connect() until the 522 * OpenSLES interface is realized. 523 */ 524 /*package*/ AudioTrack(long nativeTrackInJavaObj) { 525 super(new AudioAttributes.Builder().build()); 526 // "final"s 527 mNativeTrackInJavaObj = 0; 528 mJniData = 0; 529 530 // remember which looper is associated with the AudioTrack instantiation 531 Looper looper; 532 if ((looper = Looper.myLooper()) == null) { 533 looper = Looper.getMainLooper(); 534 } 535 mInitializationLooper = looper; 536 537 // other initialization... 538 if (nativeTrackInJavaObj != 0) { 539 deferred_connect(nativeTrackInJavaObj); 540 } else { 541 mState = STATE_UNINITIALIZED; 542 } 543 } 544 545 /** 546 * @hide 547 */ 548 /* package */ void deferred_connect(long nativeTrackInJavaObj) { 549 if (mState != STATE_INITIALIZED) { 550 // Note that for this native_setup, we are providing an already created/initialized 551 // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored. 552 int[] session = { 0 }; 553 int[] rates = { 0 }; 554 int initResult = native_setup(new WeakReference<AudioTrack>(this), 555 null /*mAttributes - NA*/, 556 rates /*sampleRate - NA*/, 557 0 /*mChannelMask - NA*/, 558 0 /*mChannelIndexMask - NA*/, 559 0 /*mAudioFormat - NA*/, 560 0 /*mNativeBufferSizeInBytes - NA*/, 561 0 /*mDataLoadMode - NA*/, 562 session, 563 nativeTrackInJavaObj); 564 if (initResult != SUCCESS) { 565 loge("Error code "+initResult+" when initializing AudioTrack."); 566 return; // with mState == STATE_UNINITIALIZED 567 } 568 569 mSessionId = session[0]; 570 571 mState = STATE_INITIALIZED; 572 } 573 } 574 575 /** 576 * Builder class for {@link AudioTrack} objects. 577 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 578 * attributes and audio format parameters, you indicate which of those vary from the default 579 * behavior on the device. 580 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 581 * parameters, to be used by a new <code>AudioTrack</code> instance: 582 * 583 * <pre class="prettyprint"> 584 * AudioTrack player = new AudioTrack.Builder() 585 * .setAudioAttributes(new AudioAttributes.Builder() 586 * .setUsage(AudioAttributes.USAGE_ALARM) 587 * .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC) 588 * .build()) 589 * .setAudioFormat(new AudioFormat.Builder() 590 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 591 * .setSampleRate(44100) 592 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 593 * .build()) 594 * .setBufferSizeInBytes(minBuffSize) 595 * .build(); 596 * </pre> 597 * <p> 598 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 599 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 600 * <br>If the audio format is not specified or is incomplete, its sample rate will be the 601 * default output sample rate of the device (see 602 * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be 603 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 604 * {@link AudioFormat#ENCODING_PCM_16BIT}. 605 * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)}, 606 * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used. 607 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 608 * <code>MODE_STREAM</code> will be used. 609 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 610 * be generated. 611 */ 612 public static class Builder { 613 private AudioAttributes mAttributes; 614 private AudioFormat mFormat; 615 private int mBufferSizeInBytes; 616 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 617 private int mMode = MODE_STREAM; 618 619 /** 620 * Constructs a new Builder with the default values as described above. 621 */ 622 public Builder() { 623 } 624 625 /** 626 * Sets the {@link AudioAttributes}. 627 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 628 * data to be played. 629 * @return the same Builder instance. 630 * @throws IllegalArgumentException 631 */ 632 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 633 throws IllegalArgumentException { 634 if (attributes == null) { 635 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 636 } 637 // keep reference, we only copy the data when building 638 mAttributes = attributes; 639 return this; 640 } 641 642 /** 643 * Sets the format of the audio data to be played by the {@link AudioTrack}. 644 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 645 * as encoding, channel mask and sample rate. 646 * @param format a non-null {@link AudioFormat} instance. 647 * @return the same Builder instance. 648 * @throws IllegalArgumentException 649 */ 650 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 651 throws IllegalArgumentException { 652 if (format == null) { 653 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 654 } 655 // keep reference, we only copy the data when building 656 mFormat = format; 657 return this; 658 } 659 660 /** 661 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 662 * If using the {@link AudioTrack} in streaming mode 663 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 664 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 665 * the minimum required buffer size for the successful creation of an AudioTrack instance 666 * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result 667 * in an exception when trying to build the <code>AudioTrack</code>. 668 * <br>If using the <code>AudioTrack</code> in static mode (see 669 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 670 * played by this instance. 671 * @param bufferSizeInBytes 672 * @return the same Builder instance. 673 * @throws IllegalArgumentException 674 */ 675 public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes) 676 throws IllegalArgumentException { 677 if (bufferSizeInBytes <= 0) { 678 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 679 } 680 mBufferSizeInBytes = bufferSizeInBytes; 681 return this; 682 } 683 684 /** 685 * Sets the mode under which buffers of audio data are transferred from the 686 * {@link AudioTrack} to the framework. 687 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 688 * @return the same Builder instance. 689 * @throws IllegalArgumentException 690 */ 691 public @NonNull Builder setTransferMode(@TransferMode int mode) 692 throws IllegalArgumentException { 693 switch(mode) { 694 case MODE_STREAM: 695 case MODE_STATIC: 696 mMode = mode; 697 break; 698 default: 699 throw new IllegalArgumentException("Invalid transfer mode " + mode); 700 } 701 return this; 702 } 703 704 /** 705 * Sets the session ID the {@link AudioTrack} will be attached to. 706 * @param sessionId a strictly positive ID number retrieved from another 707 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 708 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 709 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 710 * @return the same Builder instance. 711 * @throws IllegalArgumentException 712 */ 713 public @NonNull Builder setSessionId(int sessionId) 714 throws IllegalArgumentException { 715 if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 716 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 717 } 718 mSessionId = sessionId; 719 return this; 720 } 721 722 /** 723 * Builds an {@link AudioTrack} instance initialized with all the parameters set 724 * on this <code>Builder</code>. 725 * @return a new successfully initialized {@link AudioTrack} instance. 726 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 727 * were incompatible, or if they are not supported by the device, 728 * or if the device was not available. 729 */ 730 public @NonNull AudioTrack build() throws UnsupportedOperationException { 731 if (mAttributes == null) { 732 mAttributes = new AudioAttributes.Builder() 733 .setUsage(AudioAttributes.USAGE_MEDIA) 734 .build(); 735 } 736 if (mFormat == null) { 737 mFormat = new AudioFormat.Builder() 738 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 739 //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED) 740 .setEncoding(AudioFormat.ENCODING_DEFAULT) 741 .build(); 742 } 743 try { 744 // If the buffer size is not specified in streaming mode, 745 // use a single frame for the buffer size and let the 746 // native code figure out the minimum buffer size. 747 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) { 748 mBufferSizeInBytes = mFormat.getChannelCount() 749 * mFormat.getBytesPerSample(mFormat.getEncoding()); 750 } 751 final AudioTrack track = new AudioTrack( 752 mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId); 753 if (track.getState() == STATE_UNINITIALIZED) { 754 // release is not necessary 755 throw new UnsupportedOperationException("Cannot create AudioTrack"); 756 } 757 return track; 758 } catch (IllegalArgumentException e) { 759 throw new UnsupportedOperationException(e.getMessage()); 760 } 761 } 762 } 763 764 // mask of all the positional channels supported, however the allowed combinations 765 // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX 766 private static final int SUPPORTED_OUT_CHANNELS = 767 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 768 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 769 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 770 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 771 AudioFormat.CHANNEL_OUT_BACK_LEFT | 772 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 773 AudioFormat.CHANNEL_OUT_BACK_CENTER | 774 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 775 AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 776 777 // Convenience method for the constructor's parameter checks. 778 // This is where constructor IllegalArgumentException-s are thrown 779 // postconditions: 780 // mChannelCount is valid 781 // mChannelMask is valid 782 // mAudioFormat is valid 783 // mSampleRate is valid 784 // mDataLoadMode is valid 785 private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, 786 int audioFormat, int mode) { 787 //-------------- 788 // sample rate, note these values are subject to change 789 if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN || 790 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) && 791 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 792 throw new IllegalArgumentException(sampleRateInHz 793 + "Hz is not a supported sample rate."); 794 } 795 mSampleRate = sampleRateInHz; 796 797 // IEC61937 is based on stereo. We could coerce it to stereo. 798 // But the application needs to know the stream is stereo so that 799 // it is encoded and played correctly. So better to just reject it. 800 if (audioFormat == AudioFormat.ENCODING_IEC61937 801 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO) { 802 throw new IllegalArgumentException( 803 "ENCODING_IEC61937 must be configured as CHANNEL_OUT_STEREO"); 804 } 805 806 //-------------- 807 // channel config 808 mChannelConfiguration = channelConfig; 809 810 switch (channelConfig) { 811 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 812 case AudioFormat.CHANNEL_OUT_MONO: 813 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 814 mChannelCount = 1; 815 mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 816 break; 817 case AudioFormat.CHANNEL_OUT_STEREO: 818 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 819 mChannelCount = 2; 820 mChannelMask = AudioFormat.CHANNEL_OUT_STEREO; 821 break; 822 default: 823 if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) { 824 mChannelCount = 0; 825 break; // channel index configuration only 826 } 827 if (!isMultichannelConfigSupported(channelConfig)) { 828 // input channel configuration features unsupported channels 829 throw new IllegalArgumentException("Unsupported channel configuration."); 830 } 831 mChannelMask = channelConfig; 832 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 833 } 834 // check the channel index configuration (if present) 835 mChannelIndexMask = channelIndexMask; 836 if (mChannelIndexMask != 0) { 837 // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2 838 final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1; 839 if ((channelIndexMask & ~indexMask) != 0) { 840 throw new IllegalArgumentException("Unsupported channel index configuration " 841 + channelIndexMask); 842 } 843 int channelIndexCount = Integer.bitCount(channelIndexMask); 844 if (mChannelCount == 0) { 845 mChannelCount = channelIndexCount; 846 } else if (mChannelCount != channelIndexCount) { 847 throw new IllegalArgumentException("Channel count must match"); 848 } 849 } 850 851 //-------------- 852 // audio format 853 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 854 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 855 } 856 857 if (!AudioFormat.isPublicEncoding(audioFormat)) { 858 throw new IllegalArgumentException("Unsupported audio encoding."); 859 } 860 mAudioFormat = audioFormat; 861 862 //-------------- 863 // audio load mode 864 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 865 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 866 throw new IllegalArgumentException("Invalid mode."); 867 } 868 mDataLoadMode = mode; 869 } 870 871 /** 872 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 873 * @param channelConfig the mask to validate 874 * @return false if the AudioTrack can't be used with such a mask 875 */ 876 private static boolean isMultichannelConfigSupported(int channelConfig) { 877 // check for unsupported channels 878 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 879 loge("Channel configuration features unsupported channels"); 880 return false; 881 } 882 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 883 if (channelCount > CHANNEL_COUNT_MAX) { 884 loge("Channel configuration contains too many channels " + 885 channelCount + ">" + CHANNEL_COUNT_MAX); 886 return false; 887 } 888 // check for unsupported multichannel combinations: 889 // - FL/FR must be present 890 // - L/R channels must be paired (e.g. no single L channel) 891 final int frontPair = 892 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 893 if ((channelConfig & frontPair) != frontPair) { 894 loge("Front channels must be present in multichannel configurations"); 895 return false; 896 } 897 final int backPair = 898 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 899 if ((channelConfig & backPair) != 0) { 900 if ((channelConfig & backPair) != backPair) { 901 loge("Rear channels can't be used independently"); 902 return false; 903 } 904 } 905 final int sidePair = 906 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 907 if ((channelConfig & sidePair) != 0 908 && (channelConfig & sidePair) != sidePair) { 909 loge("Side channels can't be used independently"); 910 return false; 911 } 912 return true; 913 } 914 915 916 // Convenience method for the constructor's audio buffer size check. 917 // preconditions: 918 // mChannelCount is valid 919 // mAudioFormat is valid 920 // postcondition: 921 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) 922 private void audioBuffSizeCheck(int audioBufferSize) { 923 // NB: this section is only valid with PCM or IEC61937 data. 924 // To update when supporting compressed formats 925 int frameSizeInBytes; 926 if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) { 927 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat); 928 } else { 929 frameSizeInBytes = 1; 930 } 931 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 932 throw new IllegalArgumentException("Invalid audio buffer size."); 933 } 934 935 mNativeBufferSizeInBytes = audioBufferSize; 936 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 937 } 938 939 940 /** 941 * Releases the native AudioTrack resources. 942 */ 943 public void release() { 944 // even though native_release() stops the native AudioTrack, we need to stop 945 // AudioTrack subclasses too. 946 try { 947 stop(); 948 } catch(IllegalStateException ise) { 949 // don't raise an exception, we're releasing the resources. 950 } 951 baseRelease(); 952 native_release(); 953 mState = STATE_UNINITIALIZED; 954 } 955 956 @Override 957 protected void finalize() { 958 baseRelease(); 959 native_finalize(); 960 } 961 962 //-------------------------------------------------------------------------- 963 // Getters 964 //-------------------- 965 /** 966 * Returns the minimum gain value, which is the constant 0.0. 967 * Gain values less than 0.0 will be clamped to 0.0. 968 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 969 * @return the minimum value, which is the constant 0.0. 970 */ 971 static public float getMinVolume() { 972 return GAIN_MIN; 973 } 974 975 /** 976 * Returns the maximum gain value, which is greater than or equal to 1.0. 977 * Gain values greater than the maximum will be clamped to the maximum. 978 * <p>The word "volume" in the API name is historical; this is actually a gain. 979 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 980 * corresponds to a gain of 0 dB (sample values left unmodified). 981 * @return the maximum value, which is greater than or equal to 1.0. 982 */ 983 static public float getMaxVolume() { 984 return GAIN_MAX; 985 } 986 987 /** 988 * Returns the configured audio source sample rate in Hz. 989 * The initial source sample rate depends on the constructor parameters, 990 * but the source sample rate may change if {@link #setPlaybackRate(int)} is called. 991 * If the constructor had a specific sample rate, then the initial sink sample rate is that 992 * value. 993 * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}, 994 * then the initial sink sample rate is a route-dependent default value based on the source [sic]. 995 */ 996 public int getSampleRate() { 997 return mSampleRate; 998 } 999 1000 /** 1001 * Returns the current playback sample rate rate in Hz. 1002 */ 1003 public int getPlaybackRate() { 1004 return native_get_playback_rate(); 1005 } 1006 1007 /** 1008 * Returns the current playback parameters. 1009 * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters 1010 * @return current {@link PlaybackParams}. 1011 * @throws IllegalStateException if track is not initialized. 1012 */ 1013 public @NonNull PlaybackParams getPlaybackParams() { 1014 return native_get_playback_params(); 1015 } 1016 1017 /** 1018 * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT}, 1019 * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1020 */ 1021 public int getAudioFormat() { 1022 return mAudioFormat; 1023 } 1024 1025 /** 1026 * Returns the type of audio stream this AudioTrack is configured for. 1027 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 1028 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 1029 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 1030 * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}. 1031 */ 1032 public int getStreamType() { 1033 return mStreamType; 1034 } 1035 1036 /** 1037 * Returns the configured channel position mask. 1038 * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO}, 1039 * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}. 1040 * This method may return {@link AudioFormat#CHANNEL_INVALID} if 1041 * a channel index mask was used. Consider 1042 * {@link #getFormat()} instead, to obtain an {@link AudioFormat}, 1043 * which contains both the channel position mask and the channel index mask. 1044 */ 1045 public int getChannelConfiguration() { 1046 return mChannelConfiguration; 1047 } 1048 1049 /** 1050 * Returns the configured <code>AudioTrack</code> format. 1051 * @return an {@link AudioFormat} containing the 1052 * <code>AudioTrack</code> parameters at the time of configuration. 1053 */ 1054 public @NonNull AudioFormat getFormat() { 1055 AudioFormat.Builder builder = new AudioFormat.Builder() 1056 .setSampleRate(mSampleRate) 1057 .setEncoding(mAudioFormat); 1058 if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) { 1059 builder.setChannelMask(mChannelConfiguration); 1060 } 1061 if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) { 1062 builder.setChannelIndexMask(mChannelIndexMask); 1063 } 1064 return builder.build(); 1065 } 1066 1067 /** 1068 * Returns the configured number of channels. 1069 */ 1070 public int getChannelCount() { 1071 return mChannelCount; 1072 } 1073 1074 /** 1075 * Returns the state of the AudioTrack instance. This is useful after the 1076 * AudioTrack instance has been created to check if it was initialized 1077 * properly. This ensures that the appropriate resources have been acquired. 1078 * @see #STATE_UNINITIALIZED 1079 * @see #STATE_INITIALIZED 1080 * @see #STATE_NO_STATIC_DATA 1081 */ 1082 public int getState() { 1083 return mState; 1084 } 1085 1086 /** 1087 * Returns the playback state of the AudioTrack instance. 1088 * @see #PLAYSTATE_STOPPED 1089 * @see #PLAYSTATE_PAUSED 1090 * @see #PLAYSTATE_PLAYING 1091 */ 1092 public int getPlayState() { 1093 synchronized (mPlayStateLock) { 1094 return mPlayState; 1095 } 1096 } 1097 1098 1099 /** 1100 * Returns the effective size of the <code>AudioTrack</code> buffer 1101 * that the application writes to. 1102 * <p> This will be less than or equal to the result of 1103 * {@link #getBufferCapacityInFrames()}. 1104 * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called. 1105 * <p> If the track is subsequently routed to a different output sink, the buffer 1106 * size and capacity may enlarge to accommodate. 1107 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1108 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1109 * the size of the native <code>AudioTrack</code> buffer in bytes. 1110 * <p> See also {@link AudioManager#getProperty(String)} for key 1111 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1112 * @return current size in frames of the <code>AudioTrack</code> buffer. 1113 * @throws IllegalStateException if track is not initialized. 1114 */ 1115 public int getBufferSizeInFrames() { 1116 return native_get_buffer_size_frames(); 1117 } 1118 1119 /** 1120 * Limits the effective size of the <code>AudioTrack</code> buffer 1121 * that the application writes to. 1122 * <p> A write to this AudioTrack will not fill the buffer beyond this limit. 1123 * If a blocking write is used then the write will block until the the data 1124 * can fit within this limit. 1125 * <p>Changing this limit modifies the latency associated with 1126 * the buffer for this track. A smaller size will give lower latency 1127 * but there may be more glitches due to buffer underruns. 1128 * <p>The actual size used may not be equal to this requested size. 1129 * It will be limited to a valid range with a maximum of 1130 * {@link #getBufferCapacityInFrames()}. 1131 * It may also be adjusted slightly for internal reasons. 1132 * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE} 1133 * will be returned. 1134 * <p>This method is only supported for PCM audio. 1135 * It is not supported for compressed audio tracks. 1136 * 1137 * @param bufferSizeInFrames requested buffer size 1138 * @return the actual buffer size in frames or an error code, 1139 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION} 1140 * @throws IllegalStateException if track is not initialized. 1141 */ 1142 public int setBufferSizeInFrames(int bufferSizeInFrames) { 1143 if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) { 1144 return ERROR_INVALID_OPERATION; 1145 } 1146 if (bufferSizeInFrames < 0) { 1147 return ERROR_BAD_VALUE; 1148 } 1149 return native_set_buffer_size_frames(bufferSizeInFrames); 1150 } 1151 1152 /** 1153 * Returns the maximum size of the native <code>AudioTrack</code> buffer. 1154 * <p> If the track's creation mode is {@link #MODE_STATIC}, 1155 * it is equal to the specified bufferSizeInBytes on construction, converted to frame units. 1156 * A static track's native frame count will not change. 1157 * <p> If the track's creation mode is {@link #MODE_STREAM}, 1158 * it is greater than or equal to the specified bufferSizeInBytes converted to frame units. 1159 * For streaming tracks, this value may be rounded up to a larger value if needed by 1160 * the target output sink, and 1161 * if the track is subsequently routed to a different output sink, the native 1162 * frame count may enlarge to accommodate. 1163 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1164 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1165 * the size of the native <code>AudioTrack</code> buffer in bytes. 1166 * <p> See also {@link AudioManager#getProperty(String)} for key 1167 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1168 * @return maximum size in frames of the <code>AudioTrack</code> buffer. 1169 * @throws IllegalStateException if track is not initialized. 1170 */ 1171 public int getBufferCapacityInFrames() { 1172 return native_get_buffer_capacity_frames(); 1173 } 1174 1175 /** 1176 * Returns the frame count of the native <code>AudioTrack</code> buffer. 1177 * @return current size in frames of the <code>AudioTrack</code> buffer. 1178 * @throws IllegalStateException 1179 * @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead. 1180 */ 1181 @Deprecated 1182 protected int getNativeFrameCount() { 1183 return native_get_buffer_capacity_frames(); 1184 } 1185 1186 /** 1187 * Returns marker position expressed in frames. 1188 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 1189 * or zero if marker is disabled. 1190 */ 1191 public int getNotificationMarkerPosition() { 1192 return native_get_marker_pos(); 1193 } 1194 1195 /** 1196 * Returns the notification update period expressed in frames. 1197 * Zero means that no position update notifications are being delivered. 1198 */ 1199 public int getPositionNotificationPeriod() { 1200 return native_get_pos_update_period(); 1201 } 1202 1203 /** 1204 * Returns the playback head position expressed in frames. 1205 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 1206 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 1207 * This is a continuously advancing counter. It will wrap (overflow) periodically, 1208 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 1209 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 1210 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 1211 * the total number of frames played since reset, 1212 * <i>not</i> the current offset within the buffer. 1213 */ 1214 public int getPlaybackHeadPosition() { 1215 return native_get_position(); 1216 } 1217 1218 /** 1219 * Returns this track's estimated latency in milliseconds. This includes the latency due 1220 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 1221 * 1222 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 1223 * a better solution. 1224 * @hide 1225 */ 1226 public int getLatency() { 1227 return native_get_latency(); 1228 } 1229 1230 /** 1231 * Returns the number of underrun occurrences in the application-level write buffer 1232 * since the AudioTrack was created. 1233 * An underrun occurs if the application does not write audio 1234 * data quickly enough, causing the buffer to underflow 1235 * and a potential audio glitch or pop. 1236 * <p> 1237 * Underruns are less likely when buffer sizes are large. 1238 * It may be possible to eliminate underruns by recreating the AudioTrack with 1239 * a larger buffer. 1240 * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the 1241 * effective size of the buffer. 1242 */ 1243 public int getUnderrunCount() { 1244 return native_get_underrun_count(); 1245 } 1246 1247 /** 1248 * Returns the output sample rate in Hz for the specified stream type. 1249 */ 1250 static public int getNativeOutputSampleRate(int streamType) { 1251 return native_get_output_sample_rate(streamType); 1252 } 1253 1254 /** 1255 * Returns the minimum buffer size required for the successful creation of an AudioTrack 1256 * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't 1257 * guarantee a smooth playback under load, and higher values should be chosen according to 1258 * the expected frequency at which the buffer will be refilled with additional data to play. 1259 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 1260 * to a higher value than the initial source sample rate, be sure to configure the buffer size 1261 * based on the highest planned sample rate. 1262 * @param sampleRateInHz the source sample rate expressed in Hz. 1263 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted. 1264 * @param channelConfig describes the configuration of the audio channels. 1265 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 1266 * {@link AudioFormat#CHANNEL_OUT_STEREO} 1267 * @param audioFormat the format in which the audio data is represented. 1268 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 1269 * {@link AudioFormat#ENCODING_PCM_8BIT}, 1270 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1271 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 1272 * or {@link #ERROR} if unable to query for output properties, 1273 * or the minimum buffer size expressed in bytes. 1274 */ 1275 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 1276 int channelCount = 0; 1277 switch(channelConfig) { 1278 case AudioFormat.CHANNEL_OUT_MONO: 1279 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1280 channelCount = 1; 1281 break; 1282 case AudioFormat.CHANNEL_OUT_STEREO: 1283 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1284 channelCount = 2; 1285 break; 1286 default: 1287 if (!isMultichannelConfigSupported(channelConfig)) { 1288 loge("getMinBufferSize(): Invalid channel configuration."); 1289 return ERROR_BAD_VALUE; 1290 } else { 1291 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1292 } 1293 } 1294 1295 if (!AudioFormat.isPublicEncoding(audioFormat)) { 1296 loge("getMinBufferSize(): Invalid audio format."); 1297 return ERROR_BAD_VALUE; 1298 } 1299 1300 // sample rate, note these values are subject to change 1301 // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed 1302 if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) || 1303 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) { 1304 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 1305 return ERROR_BAD_VALUE; 1306 } 1307 1308 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 1309 if (size <= 0) { 1310 loge("getMinBufferSize(): error querying hardware"); 1311 return ERROR; 1312 } 1313 else { 1314 return size; 1315 } 1316 } 1317 1318 /** 1319 * Returns the audio session ID. 1320 * 1321 * @return the ID of the audio session this AudioTrack belongs to. 1322 */ 1323 public int getAudioSessionId() { 1324 return mSessionId; 1325 } 1326 1327 /** 1328 * Poll for a timestamp on demand. 1329 * <p> 1330 * If you need to track timestamps during initial warmup or after a routing or mode change, 1331 * you should request a new timestamp periodically until the reported timestamps 1332 * show that the frame position is advancing, or until it becomes clear that 1333 * timestamps are unavailable for this route. 1334 * <p> 1335 * After the clock is advancing at a stable rate, 1336 * query for a new timestamp approximately once every 10 seconds to once per minute. 1337 * Calling this method more often is inefficient. 1338 * It is also counter-productive to call this method more often than recommended, 1339 * because the short-term differences between successive timestamp reports are not meaningful. 1340 * If you need a high-resolution mapping between frame position and presentation time, 1341 * consider implementing that at application level, based on low-resolution timestamps. 1342 * <p> 1343 * The audio data at the returned position may either already have been 1344 * presented, or may have not yet been presented but is committed to be presented. 1345 * It is not possible to request the time corresponding to a particular position, 1346 * or to request the (fractional) position corresponding to a particular time. 1347 * If you need such features, consider implementing them at application level. 1348 * 1349 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1350 * and owned by caller. 1351 * @return true if a timestamp is available, or false if no timestamp is available. 1352 * If a timestamp if available, 1353 * the AudioTimestamp instance is filled in with a position in frame units, together 1354 * with the estimated time when that frame was presented or is committed to 1355 * be presented. 1356 * In the case that no timestamp is available, any supplied instance is left unaltered. 1357 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 1358 * or during and immediately after a route change. 1359 * A timestamp is permanently unavailable for a given route if the route does not support 1360 * timestamps. In this case, the approximate frame position can be obtained 1361 * using {@link #getPlaybackHeadPosition}. 1362 * However, it may be useful to continue to query for 1363 * timestamps occasionally, to recover after a route change. 1364 */ 1365 // Add this text when the "on new timestamp" API is added: 1366 // Use if you need to get the most recent timestamp outside of the event callback handler. 1367 public boolean getTimestamp(AudioTimestamp timestamp) 1368 { 1369 if (timestamp == null) { 1370 throw new IllegalArgumentException(); 1371 } 1372 // It's unfortunate, but we have to either create garbage every time or use synchronized 1373 long[] longArray = new long[2]; 1374 int ret = native_get_timestamp(longArray); 1375 if (ret != SUCCESS) { 1376 return false; 1377 } 1378 timestamp.framePosition = longArray[0]; 1379 timestamp.nanoTime = longArray[1]; 1380 return true; 1381 } 1382 1383 /** 1384 * Poll for a timestamp on demand. 1385 * <p> 1386 * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code. 1387 * 1388 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1389 * and owned by caller. 1390 * @return {@link #SUCCESS} if a timestamp is available 1391 * {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called 1392 * immediately after start/ACTIVE, when the number of frames consumed is less than the 1393 * overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll 1394 * again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time 1395 * for the timestamp. 1396 * {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1397 * needs to be recreated. 1398 * {@link #ERROR_INVALID_OPERATION} if current route does not support 1399 * timestamps. In this case, the approximate frame position can be obtained 1400 * using {@link #getPlaybackHeadPosition}. 1401 * 1402 * The AudioTimestamp instance is filled in with a position in frame units, together 1403 * with the estimated time when that frame was presented or is committed to 1404 * be presented. 1405 * @hide 1406 */ 1407 // Add this text when the "on new timestamp" API is added: 1408 // Use if you need to get the most recent timestamp outside of the event callback handler. 1409 public int getTimestampWithStatus(AudioTimestamp timestamp) 1410 { 1411 if (timestamp == null) { 1412 throw new IllegalArgumentException(); 1413 } 1414 // It's unfortunate, but we have to either create garbage every time or use synchronized 1415 long[] longArray = new long[2]; 1416 int ret = native_get_timestamp(longArray); 1417 timestamp.framePosition = longArray[0]; 1418 timestamp.nanoTime = longArray[1]; 1419 return ret; 1420 } 1421 1422 //-------------------------------------------------------------------------- 1423 // Initialization / configuration 1424 //-------------------- 1425 /** 1426 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1427 * for each periodic playback head position update. 1428 * Notifications will be received in the same thread as the one in which the AudioTrack 1429 * instance was created. 1430 * @param listener 1431 */ 1432 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 1433 setPlaybackPositionUpdateListener(listener, null); 1434 } 1435 1436 /** 1437 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1438 * for each periodic playback head position update. 1439 * Use this method to receive AudioTrack events in the Handler associated with another 1440 * thread than the one in which you created the AudioTrack instance. 1441 * @param listener 1442 * @param handler the Handler that will receive the event notification messages. 1443 */ 1444 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 1445 Handler handler) { 1446 if (listener != null) { 1447 mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler); 1448 } else { 1449 mEventHandlerDelegate = null; 1450 } 1451 } 1452 1453 1454 private static float clampGainOrLevel(float gainOrLevel) { 1455 if (Float.isNaN(gainOrLevel)) { 1456 throw new IllegalArgumentException(); 1457 } 1458 if (gainOrLevel < GAIN_MIN) { 1459 gainOrLevel = GAIN_MIN; 1460 } else if (gainOrLevel > GAIN_MAX) { 1461 gainOrLevel = GAIN_MAX; 1462 } 1463 return gainOrLevel; 1464 } 1465 1466 1467 /** 1468 * Sets the specified left and right output gain values on the AudioTrack. 1469 * <p>Gain values are clamped to the closed interval [0.0, max] where 1470 * max is the value of {@link #getMaxVolume}. 1471 * A value of 0.0 results in zero gain (silence), and 1472 * a value of 1.0 means unity gain (signal unchanged). 1473 * The default value is 1.0 meaning unity gain. 1474 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1475 * @param leftGain output gain for the left channel. 1476 * @param rightGain output gain for the right channel 1477 * @return error code or success, see {@link #SUCCESS}, 1478 * {@link #ERROR_INVALID_OPERATION} 1479 * @deprecated Applications should use {@link #setVolume} instead, as it 1480 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1481 */ 1482 @Deprecated 1483 public int setStereoVolume(float leftGain, float rightGain) { 1484 if (mState == STATE_UNINITIALIZED) { 1485 return ERROR_INVALID_OPERATION; 1486 } 1487 1488 baseSetVolume(leftGain, rightGain); 1489 return SUCCESS; 1490 } 1491 1492 @Override 1493 void playerSetVolume(float leftVolume, float rightVolume) { 1494 leftVolume = clampGainOrLevel(leftVolume); 1495 rightVolume = clampGainOrLevel(rightVolume); 1496 1497 native_setVolume(leftVolume, rightVolume); 1498 } 1499 1500 1501 /** 1502 * Sets the specified output gain value on all channels of this track. 1503 * <p>Gain values are clamped to the closed interval [0.0, max] where 1504 * max is the value of {@link #getMaxVolume}. 1505 * A value of 0.0 results in zero gain (silence), and 1506 * a value of 1.0 means unity gain (signal unchanged). 1507 * The default value is 1.0 meaning unity gain. 1508 * <p>This API is preferred over {@link #setStereoVolume}, as it 1509 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1510 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1511 * @param gain output gain for all channels. 1512 * @return error code or success, see {@link #SUCCESS}, 1513 * {@link #ERROR_INVALID_OPERATION} 1514 */ 1515 public int setVolume(float gain) { 1516 return setStereoVolume(gain, gain); 1517 } 1518 1519 1520 /** 1521 * Sets the playback sample rate for this track. This sets the sampling rate at which 1522 * the audio data will be consumed and played back 1523 * (as set by the sampleRateInHz parameter in the 1524 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 1525 * not the original sampling rate of the 1526 * content. For example, setting it to half the sample rate of the content will cause the 1527 * playback to last twice as long, but will also result in a pitch shift down by one octave. 1528 * The valid sample rate range is from 1 Hz to twice the value returned by 1529 * {@link #getNativeOutputSampleRate(int)}. 1530 * Use {@link #setPlaybackParams(PlaybackParams)} for speed control. 1531 * <p> This method may also be used to repurpose an existing <code>AudioTrack</code> 1532 * for playback of content of differing sample rate, 1533 * but with identical encoding and channel mask. 1534 * @param sampleRateInHz the sample rate expressed in Hz 1535 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1536 * {@link #ERROR_INVALID_OPERATION} 1537 */ 1538 public int setPlaybackRate(int sampleRateInHz) { 1539 if (mState != STATE_INITIALIZED) { 1540 return ERROR_INVALID_OPERATION; 1541 } 1542 if (sampleRateInHz <= 0) { 1543 return ERROR_BAD_VALUE; 1544 } 1545 return native_set_playback_rate(sampleRateInHz); 1546 } 1547 1548 1549 /** 1550 * Sets the playback parameters. 1551 * This method returns failure if it cannot apply the playback parameters. 1552 * One possible cause is that the parameters for speed or pitch are out of range. 1553 * Another possible cause is that the <code>AudioTrack</code> is streaming 1554 * (see {@link #MODE_STREAM}) and the 1555 * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer 1556 * on configuration must be larger than the speed multiplied by the minimum size 1557 * {@link #getMinBufferSize(int, int, int)}) to allow proper playback. 1558 * @param params see {@link PlaybackParams}. In particular, 1559 * speed, pitch, and audio mode should be set. 1560 * @throws IllegalArgumentException if the parameters are invalid or not accepted. 1561 * @throws IllegalStateException if track is not initialized. 1562 */ 1563 public void setPlaybackParams(@NonNull PlaybackParams params) { 1564 if (params == null) { 1565 throw new IllegalArgumentException("params is null"); 1566 } 1567 native_set_playback_params(params); 1568 } 1569 1570 1571 /** 1572 * Sets the position of the notification marker. At most one marker can be active. 1573 * @param markerInFrames marker position in wrapping frame units similar to 1574 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 1575 * To set a marker at a position which would appear as zero due to wraparound, 1576 * a workaround is to use a non-zero position near zero, such as -1 or 1. 1577 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1578 * {@link #ERROR_INVALID_OPERATION} 1579 */ 1580 public int setNotificationMarkerPosition(int markerInFrames) { 1581 if (mState == STATE_UNINITIALIZED) { 1582 return ERROR_INVALID_OPERATION; 1583 } 1584 return native_set_marker_pos(markerInFrames); 1585 } 1586 1587 1588 /** 1589 * Sets the period for the periodic notification event. 1590 * @param periodInFrames update period expressed in frames. 1591 * Zero period means no position updates. A negative period is not allowed. 1592 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 1593 */ 1594 public int setPositionNotificationPeriod(int periodInFrames) { 1595 if (mState == STATE_UNINITIALIZED) { 1596 return ERROR_INVALID_OPERATION; 1597 } 1598 return native_set_pos_update_period(periodInFrames); 1599 } 1600 1601 1602 /** 1603 * Sets the playback head position within the static buffer. 1604 * The track must be stopped or paused for the position to be changed, 1605 * and must use the {@link #MODE_STATIC} mode. 1606 * @param positionInFrames playback head position within buffer, expressed in frames. 1607 * Zero corresponds to start of buffer. 1608 * The position must not be greater than the buffer size in frames, or negative. 1609 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 1610 * the position values have different meanings. 1611 * <br> 1612 * If looping is currently enabled and the new position is greater than or equal to the 1613 * loop end marker, the behavior varies by API level: 1614 * as of {@link android.os.Build.VERSION_CODES#M}, 1615 * the looping is first disabled and then the position is set. 1616 * For earlier API levels, the behavior is unspecified. 1617 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1618 * {@link #ERROR_INVALID_OPERATION} 1619 */ 1620 public int setPlaybackHeadPosition(int positionInFrames) { 1621 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1622 getPlayState() == PLAYSTATE_PLAYING) { 1623 return ERROR_INVALID_OPERATION; 1624 } 1625 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 1626 return ERROR_BAD_VALUE; 1627 } 1628 return native_set_position(positionInFrames); 1629 } 1630 1631 /** 1632 * Sets the loop points and the loop count. The loop can be infinite. 1633 * Similarly to setPlaybackHeadPosition, 1634 * the track must be stopped or paused for the loop points to be changed, 1635 * and must use the {@link #MODE_STATIC} mode. 1636 * @param startInFrames loop start marker expressed in frames. 1637 * Zero corresponds to start of buffer. 1638 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 1639 * @param endInFrames loop end marker expressed in frames. 1640 * The total buffer size in frames corresponds to end of buffer. 1641 * The end marker must not be greater than the buffer size in frames. 1642 * For looping, the end marker must not be less than or equal to the start marker, 1643 * but to disable looping 1644 * it is permitted for start marker, end marker, and loop count to all be 0. 1645 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 1646 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 1647 * support, 1648 * {@link #ERROR_BAD_VALUE} is returned. 1649 * The loop range is the interval [startInFrames, endInFrames). 1650 * <br> 1651 * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged, 1652 * unless it is greater than or equal to the loop end marker, in which case 1653 * it is forced to the loop start marker. 1654 * For earlier API levels, the effect on position is unspecified. 1655 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 1656 * A value of -1 means infinite looping, and 0 disables looping. 1657 * A value of positive N means to "loop" (go back) N times. For example, 1658 * a value of one means to play the region two times in total. 1659 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1660 * {@link #ERROR_INVALID_OPERATION} 1661 */ 1662 public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) { 1663 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1664 getPlayState() == PLAYSTATE_PLAYING) { 1665 return ERROR_INVALID_OPERATION; 1666 } 1667 if (loopCount == 0) { 1668 ; // explicitly allowed as an exception to the loop region range check 1669 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 1670 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 1671 return ERROR_BAD_VALUE; 1672 } 1673 return native_set_loop(startInFrames, endInFrames, loopCount); 1674 } 1675 1676 /** 1677 * Sets the initialization state of the instance. This method was originally intended to be used 1678 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 1679 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 1680 * @param state the state of the AudioTrack instance 1681 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 1682 */ 1683 @Deprecated 1684 protected void setState(int state) { 1685 mState = state; 1686 } 1687 1688 1689 //--------------------------------------------------------- 1690 // Transport control methods 1691 //-------------------- 1692 /** 1693 * Starts playing an AudioTrack. 1694 * <p> 1695 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 1696 * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)}, 1697 * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)}, 1698 * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to 1699 * play(). 1700 * <p> 1701 * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to 1702 * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor). 1703 * If you don't call write() first, or if you call write() but with an insufficient amount of 1704 * data, then the track will be in underrun state at play(). In this case, 1705 * playback will not actually start playing until the data path is filled to a 1706 * device-specific minimum level. This requirement for the path to be filled 1707 * to a minimum level is also true when resuming audio playback after calling stop(). 1708 * Similarly the buffer will need to be filled up again after 1709 * the track underruns due to failure to call write() in a timely manner with sufficient data. 1710 * For portability, an application should prime the data path to the maximum allowed 1711 * by writing data until the write() method returns a short transfer count. 1712 * This allows play() to start immediately, and reduces the chance of underrun. 1713 * 1714 * @throws IllegalStateException if the track isn't properly initialized 1715 */ 1716 public void play() 1717 throws IllegalStateException { 1718 if (mState != STATE_INITIALIZED) { 1719 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 1720 } 1721 baseStart(); 1722 synchronized(mPlayStateLock) { 1723 native_start(); 1724 mPlayState = PLAYSTATE_PLAYING; 1725 } 1726 } 1727 1728 /** 1729 * Stops playing the audio data. 1730 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 1731 * after the last buffer that was written has been played. For an immediate stop, use 1732 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 1733 * back yet. 1734 * @throws IllegalStateException 1735 */ 1736 public void stop() 1737 throws IllegalStateException { 1738 if (mState != STATE_INITIALIZED) { 1739 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 1740 } 1741 1742 // stop playing 1743 synchronized(mPlayStateLock) { 1744 native_stop(); 1745 mPlayState = PLAYSTATE_STOPPED; 1746 mAvSyncHeader = null; 1747 mAvSyncBytesRemaining = 0; 1748 } 1749 } 1750 1751 /** 1752 * Pauses the playback of the audio data. Data that has not been played 1753 * back will not be discarded. Subsequent calls to {@link #play} will play 1754 * this data back. See {@link #flush()} to discard this data. 1755 * 1756 * @throws IllegalStateException 1757 */ 1758 public void pause() 1759 throws IllegalStateException { 1760 if (mState != STATE_INITIALIZED) { 1761 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 1762 } 1763 //logd("pause()"); 1764 1765 // pause playback 1766 synchronized(mPlayStateLock) { 1767 native_pause(); 1768 mPlayState = PLAYSTATE_PAUSED; 1769 } 1770 } 1771 1772 1773 //--------------------------------------------------------- 1774 // Audio data supply 1775 //-------------------- 1776 1777 /** 1778 * Flushes the audio data currently queued for playback. Any data that has 1779 * been written but not yet presented will be discarded. No-op if not stopped or paused, 1780 * or if the track's creation mode is not {@link #MODE_STREAM}. 1781 * <BR> Note that although data written but not yet presented is discarded, there is no 1782 * guarantee that all of the buffer space formerly used by that data 1783 * is available for a subsequent write. 1784 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 1785 * less than or equal to the total buffer size 1786 * may return a short actual transfer count. 1787 */ 1788 public void flush() { 1789 if (mState == STATE_INITIALIZED) { 1790 // flush the data in native layer 1791 native_flush(); 1792 mAvSyncHeader = null; 1793 mAvSyncBytesRemaining = 0; 1794 } 1795 1796 } 1797 1798 /** 1799 * Writes the audio data to the audio sink for playback (streaming mode), 1800 * or copies audio data for later playback (static buffer mode). 1801 * The format specified in the AudioTrack constructor should be 1802 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1803 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 1804 * <p> 1805 * In streaming mode, the write will normally block until all the data has been enqueued for 1806 * playback, and will return a full transfer count. However, if the track is stopped or paused 1807 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1808 * occurs during the write, then the write may return a short transfer count. 1809 * <p> 1810 * In static buffer mode, copies the data to the buffer starting at offset 0. 1811 * Note that the actual playback of this data might occur after this function returns. 1812 * 1813 * @param audioData the array that holds the data to play. 1814 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 1815 * starts. 1816 * Must not be negative, or cause the data access to go out of bounds of the array. 1817 * @param sizeInBytes the number of bytes to write in audioData after the offset. 1818 * Must not be negative, or cause the data access to go out of bounds of the array. 1819 * @return zero or the positive number of bytes that were written, or 1820 * {@link #ERROR_INVALID_OPERATION} 1821 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1822 * the parameters don't resolve to valid data and indexes, or 1823 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1824 * needs to be recreated. 1825 * The dead object error code is not returned if some data was successfully transferred. 1826 * In this case, the error is returned at the next write(). 1827 * The number of bytes will be a multiple of the frame size in bytes 1828 * not to exceed sizeInBytes. 1829 * 1830 * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code> 1831 * set to {@link #WRITE_BLOCKING}. 1832 */ 1833 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) { 1834 return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING); 1835 } 1836 1837 /** 1838 * Writes the audio data to the audio sink for playback (streaming mode), 1839 * or copies audio data for later playback (static buffer mode). 1840 * The format specified in the AudioTrack constructor should be 1841 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1842 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 1843 * <p> 1844 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1845 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1846 * for playback, and will return a full transfer count. However, if the write mode is 1847 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1848 * interrupts the write by calling stop or pause, or an I/O error 1849 * occurs during the write, then the write may return a short transfer count. 1850 * <p> 1851 * In static buffer mode, copies the data to the buffer starting at offset 0, 1852 * and the write mode is ignored. 1853 * Note that the actual playback of this data might occur after this function returns. 1854 * 1855 * @param audioData the array that holds the data to play. 1856 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 1857 * starts. 1858 * Must not be negative, or cause the data access to go out of bounds of the array. 1859 * @param sizeInBytes the number of bytes to write in audioData after the offset. 1860 * Must not be negative, or cause the data access to go out of bounds of the array. 1861 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1862 * effect in static mode. 1863 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1864 * to the audio sink. 1865 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1866 * queuing as much audio data for playback as possible without blocking. 1867 * @return zero or the positive number of bytes that were written, or 1868 * {@link #ERROR_INVALID_OPERATION} 1869 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1870 * the parameters don't resolve to valid data and indexes, or 1871 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1872 * needs to be recreated. 1873 * The dead object error code is not returned if some data was successfully transferred. 1874 * In this case, the error is returned at the next write(). 1875 * The number of bytes will be a multiple of the frame size in bytes 1876 * not to exceed sizeInBytes. 1877 */ 1878 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes, 1879 @WriteMode int writeMode) { 1880 1881 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1882 return ERROR_INVALID_OPERATION; 1883 } 1884 1885 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1886 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1887 return ERROR_BAD_VALUE; 1888 } 1889 1890 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 1891 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 1892 || (offsetInBytes + sizeInBytes > audioData.length)) { 1893 return ERROR_BAD_VALUE; 1894 } 1895 1896 int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 1897 writeMode == WRITE_BLOCKING); 1898 1899 if ((mDataLoadMode == MODE_STATIC) 1900 && (mState == STATE_NO_STATIC_DATA) 1901 && (ret > 0)) { 1902 // benign race with respect to other APIs that read mState 1903 mState = STATE_INITIALIZED; 1904 } 1905 1906 return ret; 1907 } 1908 1909 /** 1910 * Writes the audio data to the audio sink for playback (streaming mode), 1911 * or copies audio data for later playback (static buffer mode). 1912 * The format specified in the AudioTrack constructor should be 1913 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1914 * <p> 1915 * In streaming mode, the write will normally block until all the data has been enqueued for 1916 * playback, and will return a full transfer count. However, if the track is stopped or paused 1917 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1918 * occurs during the write, then the write may return a short transfer count. 1919 * <p> 1920 * In static buffer mode, copies the data to the buffer starting at offset 0. 1921 * Note that the actual playback of this data might occur after this function returns. 1922 * 1923 * @param audioData the array that holds the data to play. 1924 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1925 * starts. 1926 * Must not be negative, or cause the data access to go out of bounds of the array. 1927 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1928 * Must not be negative, or cause the data access to go out of bounds of the array. 1929 * @return zero or the positive number of shorts that were written, or 1930 * {@link #ERROR_INVALID_OPERATION} 1931 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1932 * the parameters don't resolve to valid data and indexes, or 1933 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1934 * needs to be recreated. 1935 * The dead object error code is not returned if some data was successfully transferred. 1936 * In this case, the error is returned at the next write(). 1937 * The number of shorts will be a multiple of the channel count not to exceed sizeInShorts. 1938 * 1939 * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code> 1940 * set to {@link #WRITE_BLOCKING}. 1941 */ 1942 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) { 1943 return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING); 1944 } 1945 1946 /** 1947 * Writes the audio data to the audio sink for playback (streaming mode), 1948 * or copies audio data for later playback (static buffer mode). 1949 * The format specified in the AudioTrack constructor should be 1950 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1951 * <p> 1952 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1953 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1954 * for playback, and will return a full transfer count. However, if the write mode is 1955 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1956 * interrupts the write by calling stop or pause, or an I/O error 1957 * occurs during the write, then the write may return a short transfer count. 1958 * <p> 1959 * In static buffer mode, copies the data to the buffer starting at offset 0. 1960 * Note that the actual playback of this data might occur after this function returns. 1961 * 1962 * @param audioData the array that holds the data to write. 1963 * @param offsetInShorts the offset expressed in shorts in audioData where the data to write 1964 * starts. 1965 * Must not be negative, or cause the data access to go out of bounds of the array. 1966 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1967 * Must not be negative, or cause the data access to go out of bounds of the array. 1968 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1969 * effect in static mode. 1970 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1971 * to the audio sink. 1972 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1973 * queuing as much audio data for playback as possible without blocking. 1974 * @return zero or the positive number of shorts that were written, or 1975 * {@link #ERROR_INVALID_OPERATION} 1976 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1977 * the parameters don't resolve to valid data and indexes, or 1978 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1979 * needs to be recreated. 1980 * The dead object error code is not returned if some data was successfully transferred. 1981 * In this case, the error is returned at the next write(). 1982 * The number of shorts will be a multiple of the channel count not to exceed sizeInShorts. 1983 */ 1984 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts, 1985 @WriteMode int writeMode) { 1986 1987 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1988 return ERROR_INVALID_OPERATION; 1989 } 1990 1991 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1992 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1993 return ERROR_BAD_VALUE; 1994 } 1995 1996 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 1997 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 1998 || (offsetInShorts + sizeInShorts > audioData.length)) { 1999 return ERROR_BAD_VALUE; 2000 } 2001 2002 int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat, 2003 writeMode == WRITE_BLOCKING); 2004 2005 if ((mDataLoadMode == MODE_STATIC) 2006 && (mState == STATE_NO_STATIC_DATA) 2007 && (ret > 0)) { 2008 // benign race with respect to other APIs that read mState 2009 mState = STATE_INITIALIZED; 2010 } 2011 2012 return ret; 2013 } 2014 2015 /** 2016 * Writes the audio data to the audio sink for playback (streaming mode), 2017 * or copies audio data for later playback (static buffer mode). 2018 * The format specified in the AudioTrack constructor should be 2019 * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array. 2020 * <p> 2021 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2022 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2023 * for playback, and will return a full transfer count. However, if the write mode is 2024 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2025 * interrupts the write by calling stop or pause, or an I/O error 2026 * occurs during the write, then the write may return a short transfer count. 2027 * <p> 2028 * In static buffer mode, copies the data to the buffer starting at offset 0, 2029 * and the write mode is ignored. 2030 * Note that the actual playback of this data might occur after this function returns. 2031 * 2032 * @param audioData the array that holds the data to write. 2033 * The implementation does not clip for sample values within the nominal range 2034 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 2035 * less than or equal to unity (1.0f), and in the absence of post-processing effects 2036 * that could add energy, such as reverb. For the convenience of applications 2037 * that compute samples using filters with non-unity gain, 2038 * sample values +3 dB beyond the nominal range are permitted. 2039 * However such values may eventually be limited or clipped, depending on various gains 2040 * and later processing in the audio path. Therefore applications are encouraged 2041 * to provide samples values within the nominal range. 2042 * @param offsetInFloats the offset, expressed as a number of floats, 2043 * in audioData where the data to write starts. 2044 * Must not be negative, or cause the data access to go out of bounds of the array. 2045 * @param sizeInFloats the number of floats to write in audioData after the offset. 2046 * Must not be negative, or cause the data access to go out of bounds of the array. 2047 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2048 * effect in static mode. 2049 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2050 * to the audio sink. 2051 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2052 * queuing as much audio data for playback as possible without blocking. 2053 * @return zero or the positive number of floats that were written, or 2054 * {@link #ERROR_INVALID_OPERATION} 2055 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 2056 * the parameters don't resolve to valid data and indexes, or 2057 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2058 * needs to be recreated. 2059 * The dead object error code is not returned if some data was successfully transferred. 2060 * In this case, the error is returned at the next write(). 2061 * The number of floats will be a multiple of the channel count not to exceed sizeInFloats. 2062 */ 2063 public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats, 2064 @WriteMode int writeMode) { 2065 2066 if (mState == STATE_UNINITIALIZED) { 2067 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2068 return ERROR_INVALID_OPERATION; 2069 } 2070 2071 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 2072 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 2073 return ERROR_INVALID_OPERATION; 2074 } 2075 2076 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2077 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2078 return ERROR_BAD_VALUE; 2079 } 2080 2081 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 2082 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 2083 || (offsetInFloats + sizeInFloats > audioData.length)) { 2084 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 2085 return ERROR_BAD_VALUE; 2086 } 2087 2088 int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 2089 writeMode == WRITE_BLOCKING); 2090 2091 if ((mDataLoadMode == MODE_STATIC) 2092 && (mState == STATE_NO_STATIC_DATA) 2093 && (ret > 0)) { 2094 // benign race with respect to other APIs that read mState 2095 mState = STATE_INITIALIZED; 2096 } 2097 2098 return ret; 2099 } 2100 2101 2102 /** 2103 * Writes the audio data to the audio sink for playback (streaming mode), 2104 * or copies audio data for later playback (static buffer mode). 2105 * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor. 2106 * <p> 2107 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2108 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2109 * for playback, and will return a full transfer count. However, if the write mode is 2110 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2111 * interrupts the write by calling stop or pause, or an I/O error 2112 * occurs during the write, then the write may return a short transfer count. 2113 * <p> 2114 * In static buffer mode, copies the data to the buffer starting at offset 0, 2115 * and the write mode is ignored. 2116 * Note that the actual playback of this data might occur after this function returns. 2117 * 2118 * @param audioData the buffer that holds the data to write, starting at the position reported 2119 * by <code>audioData.position()</code>. 2120 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2121 * have been advanced to reflect the amount of data that was successfully written to 2122 * the AudioTrack. 2123 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2124 * that the number of bytes requested be a multiple of the frame size (sample size in 2125 * bytes multiplied by the channel count). 2126 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2127 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2128 * effect in static mode. 2129 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2130 * to the audio sink. 2131 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2132 * queuing as much audio data for playback as possible without blocking. 2133 * @return zero or the positive number of bytes that were written, or 2134 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 2135 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2136 * needs to be recreated. 2137 * The dead object error code is not returned if some data was successfully transferred. 2138 * In this case, the error is returned at the next write(). 2139 */ 2140 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2141 @WriteMode int writeMode) { 2142 2143 if (mState == STATE_UNINITIALIZED) { 2144 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2145 return ERROR_INVALID_OPERATION; 2146 } 2147 2148 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2149 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2150 return ERROR_BAD_VALUE; 2151 } 2152 2153 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2154 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2155 return ERROR_BAD_VALUE; 2156 } 2157 2158 int ret = 0; 2159 if (audioData.isDirect()) { 2160 ret = native_write_native_bytes(audioData, 2161 audioData.position(), sizeInBytes, mAudioFormat, 2162 writeMode == WRITE_BLOCKING); 2163 } else { 2164 ret = native_write_byte(NioUtils.unsafeArray(audioData), 2165 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 2166 sizeInBytes, mAudioFormat, 2167 writeMode == WRITE_BLOCKING); 2168 } 2169 2170 if ((mDataLoadMode == MODE_STATIC) 2171 && (mState == STATE_NO_STATIC_DATA) 2172 && (ret > 0)) { 2173 // benign race with respect to other APIs that read mState 2174 mState = STATE_INITIALIZED; 2175 } 2176 2177 if (ret > 0) { 2178 audioData.position(audioData.position() + ret); 2179 } 2180 2181 return ret; 2182 } 2183 2184 /** 2185 * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track. 2186 * The blocking behavior will depend on the write mode. 2187 * @param audioData the buffer that holds the data to write, starting at the position reported 2188 * by <code>audioData.position()</code>. 2189 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2190 * have been advanced to reflect the amount of data that was successfully written to 2191 * the AudioTrack. 2192 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2193 * that the number of bytes requested be a multiple of the frame size (sample size in 2194 * bytes multiplied by the channel count). 2195 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2196 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. 2197 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2198 * to the audio sink. 2199 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2200 * queuing as much audio data for playback as possible without blocking. 2201 * @param timestamp The timestamp of the first decodable audio frame in the provided audioData. 2202 * @return zero or a positive number of bytes that were written, or 2203 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 2204 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2205 * needs to be recreated. 2206 * The dead object error code is not returned if some data was successfully transferred. 2207 * In this case, the error is returned at the next write(). 2208 */ 2209 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2210 @WriteMode int writeMode, long timestamp) { 2211 2212 if (mState == STATE_UNINITIALIZED) { 2213 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2214 return ERROR_INVALID_OPERATION; 2215 } 2216 2217 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2218 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2219 return ERROR_BAD_VALUE; 2220 } 2221 2222 if (mDataLoadMode != MODE_STREAM) { 2223 Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track"); 2224 return ERROR_INVALID_OPERATION; 2225 } 2226 2227 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) { 2228 Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts..."); 2229 return write(audioData, sizeInBytes, writeMode); 2230 } 2231 2232 if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2233 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2234 return ERROR_BAD_VALUE; 2235 } 2236 2237 // create timestamp header if none exists 2238 if (mAvSyncHeader == null) { 2239 mAvSyncHeader = ByteBuffer.allocate(16); 2240 mAvSyncHeader.order(ByteOrder.BIG_ENDIAN); 2241 mAvSyncHeader.putInt(0x55550001); 2242 mAvSyncHeader.putInt(sizeInBytes); 2243 mAvSyncHeader.putLong(timestamp); 2244 mAvSyncHeader.position(0); 2245 mAvSyncBytesRemaining = sizeInBytes; 2246 } 2247 2248 // write timestamp header if not completely written already 2249 int ret = 0; 2250 if (mAvSyncHeader.remaining() != 0) { 2251 ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode); 2252 if (ret < 0) { 2253 Log.e(TAG, "AudioTrack.write() could not write timestamp header!"); 2254 mAvSyncHeader = null; 2255 mAvSyncBytesRemaining = 0; 2256 return ret; 2257 } 2258 if (mAvSyncHeader.remaining() > 0) { 2259 Log.v(TAG, "AudioTrack.write() partial timestamp header written."); 2260 return 0; 2261 } 2262 } 2263 2264 // write audio data 2265 int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes); 2266 ret = write(audioData, sizeToWrite, writeMode); 2267 if (ret < 0) { 2268 Log.e(TAG, "AudioTrack.write() could not write audio data!"); 2269 mAvSyncHeader = null; 2270 mAvSyncBytesRemaining = 0; 2271 return ret; 2272 } 2273 2274 mAvSyncBytesRemaining -= ret; 2275 if (mAvSyncBytesRemaining == 0) { 2276 mAvSyncHeader = null; 2277 } 2278 2279 return ret; 2280 } 2281 2282 2283 /** 2284 * Sets the playback head position within the static buffer to zero, 2285 * that is it rewinds to start of static buffer. 2286 * The track must be stopped or paused, and 2287 * the track's creation mode must be {@link #MODE_STATIC}. 2288 * <p> 2289 * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by 2290 * {@link #getPlaybackHeadPosition()} to zero. 2291 * For earlier API levels, the reset behavior is unspecified. 2292 * <p> 2293 * Use {@link #setPlaybackHeadPosition(int)} with a zero position 2294 * if the reset of <code>getPlaybackHeadPosition()</code> is not needed. 2295 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2296 * {@link #ERROR_INVALID_OPERATION} 2297 */ 2298 public int reloadStaticData() { 2299 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 2300 return ERROR_INVALID_OPERATION; 2301 } 2302 return native_reload_static(); 2303 } 2304 2305 //-------------------------------------------------------------------------- 2306 // Audio effects management 2307 //-------------------- 2308 2309 /** 2310 * Attaches an auxiliary effect to the audio track. A typical auxiliary 2311 * effect is a reverberation effect which can be applied on any sound source 2312 * that directs a certain amount of its energy to this effect. This amount 2313 * is defined by setAuxEffectSendLevel(). 2314 * {@see #setAuxEffectSendLevel(float)}. 2315 * <p>After creating an auxiliary effect (e.g. 2316 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 2317 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 2318 * this method to attach the audio track to the effect. 2319 * <p>To detach the effect from the audio track, call this method with a 2320 * null effect id. 2321 * 2322 * @param effectId system wide unique id of the effect to attach 2323 * @return error code or success, see {@link #SUCCESS}, 2324 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 2325 */ 2326 public int attachAuxEffect(int effectId) { 2327 if (mState == STATE_UNINITIALIZED) { 2328 return ERROR_INVALID_OPERATION; 2329 } 2330 return native_attachAuxEffect(effectId); 2331 } 2332 2333 /** 2334 * Sets the send level of the audio track to the attached auxiliary effect 2335 * {@link #attachAuxEffect(int)}. Effect levels 2336 * are clamped to the closed interval [0.0, max] where 2337 * max is the value of {@link #getMaxVolume}. 2338 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 2339 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 2340 * this method must be called for the effect to be applied. 2341 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 2342 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 2343 * so an appropriate conversion from linear UI input x to level is: 2344 * x == 0 -> level = 0 2345 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 2346 * 2347 * @param level linear send level 2348 * @return error code or success, see {@link #SUCCESS}, 2349 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 2350 */ 2351 public int setAuxEffectSendLevel(float level) { 2352 if (mState == STATE_UNINITIALIZED) { 2353 return ERROR_INVALID_OPERATION; 2354 } 2355 return baseSetAuxEffectSendLevel(level); 2356 } 2357 2358 @Override 2359 int playerSetAuxEffectSendLevel(float level) { 2360 level = clampGainOrLevel(level); 2361 int err = native_setAuxEffectSendLevel(level); 2362 return err == 0 ? SUCCESS : ERROR; 2363 } 2364 2365 //-------------------------------------------------------------------------- 2366 // Explicit Routing 2367 //-------------------- 2368 private AudioDeviceInfo mPreferredDevice = null; 2369 2370 /** 2371 * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route 2372 * the output from this AudioTrack. 2373 * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink. 2374 * If deviceInfo is null, default routing is restored. 2375 * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and 2376 * does not correspond to a valid audio output device. 2377 */ 2378 @Override 2379 public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) { 2380 // Do some validation.... 2381 if (deviceInfo != null && !deviceInfo.isSink()) { 2382 return false; 2383 } 2384 int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0; 2385 boolean status = native_setOutputDevice(preferredDeviceId); 2386 if (status == true) { 2387 synchronized (this) { 2388 mPreferredDevice = deviceInfo; 2389 } 2390 } 2391 return status; 2392 } 2393 2394 /** 2395 * Returns the selected output specified by {@link #setPreferredDevice}. Note that this 2396 * is not guaranteed to correspond to the actual device being used for playback. 2397 */ 2398 @Override 2399 public AudioDeviceInfo getPreferredDevice() { 2400 synchronized (this) { 2401 return mPreferredDevice; 2402 } 2403 } 2404 2405 /** 2406 * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack. 2407 * Note: The query is only valid if the AudioTrack is currently playing. If it is not, 2408 * <code>getRoutedDevice()</code> will return null. 2409 */ 2410 @Override 2411 public AudioDeviceInfo getRoutedDevice() { 2412 int deviceId = native_getRoutedDeviceId(); 2413 if (deviceId == 0) { 2414 return null; 2415 } 2416 AudioDeviceInfo[] devices = 2417 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS); 2418 for (int i = 0; i < devices.length; i++) { 2419 if (devices[i].getId() == deviceId) { 2420 return devices[i]; 2421 } 2422 } 2423 return null; 2424 } 2425 2426 /* 2427 * Call BEFORE adding a routing callback handler. 2428 */ 2429 private void testEnableNativeRoutingCallbacksLocked() { 2430 if (mRoutingChangeListeners.size() == 0) { 2431 native_enableDeviceCallback(); 2432 } 2433 } 2434 2435 /* 2436 * Call AFTER removing a routing callback handler. 2437 */ 2438 private void testDisableNativeRoutingCallbacksLocked() { 2439 if (mRoutingChangeListeners.size() == 0) { 2440 native_disableDeviceCallback(); 2441 } 2442 } 2443 2444 //-------------------------------------------------------------------------- 2445 // (Re)Routing Info 2446 //-------------------- 2447 /** 2448 * The list of AudioRouting.OnRoutingChangedListener interfaces added (with 2449 * {@link AudioRecord#addOnRoutingChangedListener} by an app to receive 2450 * (re)routing notifications. 2451 */ 2452 @GuardedBy("mRoutingChangeListeners") 2453 private ArrayMap<AudioRouting.OnRoutingChangedListener, 2454 NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>(); 2455 2456 /** 2457 * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing 2458 * changes on this AudioTrack. 2459 * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive 2460 * notifications of rerouting events. 2461 * @param handler Specifies the {@link Handler} object for the thread on which to execute 2462 * the callback. If <code>null</code>, the {@link Handler} associated with the main 2463 * {@link Looper} will be used. 2464 */ 2465 @Override 2466 public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, 2467 Handler handler) { 2468 synchronized (mRoutingChangeListeners) { 2469 if (listener != null && !mRoutingChangeListeners.containsKey(listener)) { 2470 testEnableNativeRoutingCallbacksLocked(); 2471 mRoutingChangeListeners.put( 2472 listener, new NativeRoutingEventHandlerDelegate(this, listener, 2473 handler != null ? handler : new Handler(mInitializationLooper))); 2474 } 2475 } 2476 } 2477 2478 /** 2479 * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added 2480 * to receive rerouting notifications. 2481 * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface 2482 * to remove. 2483 */ 2484 @Override 2485 public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) { 2486 synchronized (mRoutingChangeListeners) { 2487 if (mRoutingChangeListeners.containsKey(listener)) { 2488 mRoutingChangeListeners.remove(listener); 2489 } 2490 testDisableNativeRoutingCallbacksLocked(); 2491 } 2492 } 2493 2494 //-------------------------------------------------------------------------- 2495 // (Re)Routing Info 2496 //-------------------- 2497 /** 2498 * Defines the interface by which applications can receive notifications of 2499 * routing changes for the associated {@link AudioTrack}. 2500 * 2501 * @deprecated users should switch to the general purpose 2502 * {@link AudioRouting.OnRoutingChangedListener} class instead. 2503 */ 2504 @Deprecated 2505 public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener { 2506 /** 2507 * Called when the routing of an AudioTrack changes from either and 2508 * explicit or policy rerouting. Use {@link #getRoutedDevice()} to 2509 * retrieve the newly routed-to device. 2510 */ 2511 public void onRoutingChanged(AudioTrack audioTrack); 2512 2513 @Override 2514 default public void onRoutingChanged(AudioRouting router) { 2515 if (router instanceof AudioTrack) { 2516 onRoutingChanged((AudioTrack) router); 2517 } 2518 } 2519 } 2520 2521 /** 2522 * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes 2523 * on this AudioTrack. 2524 * @param listener The {@link OnRoutingChangedListener} interface to receive notifications 2525 * of rerouting events. 2526 * @param handler Specifies the {@link Handler} object for the thread on which to execute 2527 * the callback. If <code>null</code>, the {@link Handler} associated with the main 2528 * {@link Looper} will be used. 2529 * @deprecated users should switch to the general purpose 2530 * {@link AudioRouting.OnRoutingChangedListener} class instead. 2531 */ 2532 @Deprecated 2533 public void addOnRoutingChangedListener(OnRoutingChangedListener listener, 2534 android.os.Handler handler) { 2535 addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler); 2536 } 2537 2538 /** 2539 * Removes an {@link OnRoutingChangedListener} which has been previously added 2540 * to receive rerouting notifications. 2541 * @param listener The previously added {@link OnRoutingChangedListener} interface to remove. 2542 * @deprecated users should switch to the general purpose 2543 * {@link AudioRouting.OnRoutingChangedListener} class instead. 2544 */ 2545 @Deprecated 2546 public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) { 2547 removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener); 2548 } 2549 2550 /** 2551 * Sends device list change notification to all listeners. 2552 */ 2553 private void broadcastRoutingChange() { 2554 AudioManager.resetAudioPortGeneration(); 2555 synchronized (mRoutingChangeListeners) { 2556 for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) { 2557 Handler handler = delegate.getHandler(); 2558 if (handler != null) { 2559 handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE); 2560 } 2561 } 2562 } 2563 } 2564 2565 //--------------------------------------------------------- 2566 // Interface definitions 2567 //-------------------- 2568 /** 2569 * Interface definition for a callback to be invoked when the playback head position of 2570 * an AudioTrack has reached a notification marker or has increased by a certain period. 2571 */ 2572 public interface OnPlaybackPositionUpdateListener { 2573 /** 2574 * Called on the listener to notify it that the previously set marker has been reached 2575 * by the playback head. 2576 */ 2577 void onMarkerReached(AudioTrack track); 2578 2579 /** 2580 * Called on the listener to periodically notify it that the playback head has reached 2581 * a multiple of the notification period. 2582 */ 2583 void onPeriodicNotification(AudioTrack track); 2584 } 2585 2586 //--------------------------------------------------------- 2587 // Inner classes 2588 //-------------------- 2589 /** 2590 * Helper class to handle the forwarding of native events to the appropriate listener 2591 * (potentially) handled in a different thread 2592 */ 2593 private class NativePositionEventHandlerDelegate { 2594 private final Handler mHandler; 2595 2596 NativePositionEventHandlerDelegate(final AudioTrack track, 2597 final OnPlaybackPositionUpdateListener listener, 2598 Handler handler) { 2599 // find the looper for our new event handler 2600 Looper looper; 2601 if (handler != null) { 2602 looper = handler.getLooper(); 2603 } else { 2604 // no given handler, use the looper the AudioTrack was created in 2605 looper = mInitializationLooper; 2606 } 2607 2608 // construct the event handler with this looper 2609 if (looper != null) { 2610 // implement the event handler delegate 2611 mHandler = new Handler(looper) { 2612 @Override 2613 public void handleMessage(Message msg) { 2614 if (track == null) { 2615 return; 2616 } 2617 switch(msg.what) { 2618 case NATIVE_EVENT_MARKER: 2619 if (listener != null) { 2620 listener.onMarkerReached(track); 2621 } 2622 break; 2623 case NATIVE_EVENT_NEW_POS: 2624 if (listener != null) { 2625 listener.onPeriodicNotification(track); 2626 } 2627 break; 2628 default: 2629 loge("Unknown native event type: " + msg.what); 2630 break; 2631 } 2632 } 2633 }; 2634 } else { 2635 mHandler = null; 2636 } 2637 } 2638 2639 Handler getHandler() { 2640 return mHandler; 2641 } 2642 } 2643 2644 /** 2645 * Helper class to handle the forwarding of native events to the appropriate listener 2646 * (potentially) handled in a different thread 2647 */ 2648 private class NativeRoutingEventHandlerDelegate { 2649 private final Handler mHandler; 2650 2651 NativeRoutingEventHandlerDelegate(final AudioTrack track, 2652 final AudioRouting.OnRoutingChangedListener listener, 2653 Handler handler) { 2654 // find the looper for our new event handler 2655 Looper looper; 2656 if (handler != null) { 2657 looper = handler.getLooper(); 2658 } else { 2659 // no given handler, use the looper the AudioTrack was created in 2660 looper = mInitializationLooper; 2661 } 2662 2663 // construct the event handler with this looper 2664 if (looper != null) { 2665 // implement the event handler delegate 2666 mHandler = new Handler(looper) { 2667 @Override 2668 public void handleMessage(Message msg) { 2669 if (track == null) { 2670 return; 2671 } 2672 switch(msg.what) { 2673 case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE: 2674 if (listener != null) { 2675 listener.onRoutingChanged(track); 2676 } 2677 break; 2678 default: 2679 loge("Unknown native event type: " + msg.what); 2680 break; 2681 } 2682 } 2683 }; 2684 } else { 2685 mHandler = null; 2686 } 2687 } 2688 2689 Handler getHandler() { 2690 return mHandler; 2691 } 2692 } 2693 2694 //--------------------------------------------------------- 2695 // Java methods called from the native side 2696 //-------------------- 2697 @SuppressWarnings("unused") 2698 private static void postEventFromNative(Object audiotrack_ref, 2699 int what, int arg1, int arg2, Object obj) { 2700 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 2701 AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get(); 2702 if (track == null) { 2703 return; 2704 } 2705 2706 if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) { 2707 track.broadcastRoutingChange(); 2708 return; 2709 } 2710 NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate; 2711 if (delegate != null) { 2712 Handler handler = delegate.getHandler(); 2713 if (handler != null) { 2714 Message m = handler.obtainMessage(what, arg1, arg2, obj); 2715 handler.sendMessage(m); 2716 } 2717 } 2718 } 2719 2720 2721 //--------------------------------------------------------- 2722 // Native methods called from the Java side 2723 //-------------------- 2724 2725 // post-condition: mStreamType is overwritten with a value 2726 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 2727 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC 2728 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 2729 Object /*AudioAttributes*/ attributes, 2730 int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, 2731 int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack); 2732 2733 private native final void native_finalize(); 2734 2735 /** 2736 * @hide 2737 */ 2738 public native final void native_release(); 2739 2740 private native final void native_start(); 2741 2742 private native final void native_stop(); 2743 2744 private native final void native_pause(); 2745 2746 private native final void native_flush(); 2747 2748 private native final int native_write_byte(byte[] audioData, 2749 int offsetInBytes, int sizeInBytes, int format, 2750 boolean isBlocking); 2751 2752 private native final int native_write_short(short[] audioData, 2753 int offsetInShorts, int sizeInShorts, int format, 2754 boolean isBlocking); 2755 2756 private native final int native_write_float(float[] audioData, 2757 int offsetInFloats, int sizeInFloats, int format, 2758 boolean isBlocking); 2759 2760 private native final int native_write_native_bytes(Object audioData, 2761 int positionInBytes, int sizeInBytes, int format, boolean blocking); 2762 2763 private native final int native_reload_static(); 2764 2765 private native final int native_get_buffer_size_frames(); 2766 private native final int native_set_buffer_size_frames(int bufferSizeInFrames); 2767 private native final int native_get_buffer_capacity_frames(); 2768 2769 private native final void native_setVolume(float leftVolume, float rightVolume); 2770 2771 private native final int native_set_playback_rate(int sampleRateInHz); 2772 private native final int native_get_playback_rate(); 2773 2774 private native final void native_set_playback_params(@NonNull PlaybackParams params); 2775 private native final @NonNull PlaybackParams native_get_playback_params(); 2776 2777 private native final int native_set_marker_pos(int marker); 2778 private native final int native_get_marker_pos(); 2779 2780 private native final int native_set_pos_update_period(int updatePeriod); 2781 private native final int native_get_pos_update_period(); 2782 2783 private native final int native_set_position(int position); 2784 private native final int native_get_position(); 2785 2786 private native final int native_get_latency(); 2787 2788 private native final int native_get_underrun_count(); 2789 2790 // longArray must be a non-null array of length >= 2 2791 // [0] is assigned the frame position 2792 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds 2793 private native final int native_get_timestamp(long[] longArray); 2794 2795 private native final int native_set_loop(int start, int end, int loopCount); 2796 2797 static private native final int native_get_output_sample_rate(int streamType); 2798 static private native final int native_get_min_buff_size( 2799 int sampleRateInHz, int channelConfig, int audioFormat); 2800 2801 private native final int native_attachAuxEffect(int effectId); 2802 private native final int native_setAuxEffectSendLevel(float level); 2803 2804 private native final boolean native_setOutputDevice(int deviceId); 2805 private native final int native_getRoutedDeviceId(); 2806 private native final void native_enableDeviceCallback(); 2807 private native final void native_disableDeviceCallback(); 2808 static private native int native_get_FCC_8(); 2809 2810 //--------------------------------------------------------- 2811 // Utility methods 2812 //------------------ 2813 2814 private static void logd(String msg) { 2815 Log.d(TAG, msg); 2816 } 2817 2818 private static void loge(String msg) { 2819 Log.e(TAG, msg); 2820 } 2821} 2822