AudioTrack.java revision 33b840444f5a481dd31e129079d3c0cf3acdf80e
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17package android.media; 18 19import java.lang.annotation.Retention; 20import java.lang.annotation.RetentionPolicy; 21import java.lang.ref.WeakReference; 22import java.lang.Math; 23import java.nio.ByteBuffer; 24import java.nio.ByteOrder; 25import java.nio.NioUtils; 26import java.util.Collection; 27 28import android.annotation.IntDef; 29import android.annotation.NonNull; 30import android.app.ActivityThread; 31import android.app.AppOpsManager; 32import android.content.Context; 33import android.os.Handler; 34import android.os.IBinder; 35import android.os.Looper; 36import android.os.Message; 37import android.os.Process; 38import android.os.RemoteException; 39import android.os.ServiceManager; 40import android.util.ArrayMap; 41import android.util.Log; 42 43import com.android.internal.app.IAppOpsService; 44 45/** 46 * The AudioTrack class manages and plays a single audio resource for Java applications. 47 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 48 * achieved by "pushing" the data to the AudioTrack object using one of the 49 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 50 * and {@link #write(float[], int, int, int)} methods. 51 * 52 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 53 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 54 * one of the {@code write()} methods. These are blocking and return when the data has been 55 * transferred from the Java layer to the native layer and queued for playback. The streaming 56 * mode is most useful when playing blocks of audio data that for instance are: 57 * 58 * <ul> 59 * <li>too big to fit in memory because of the duration of the sound to play,</li> 60 * <li>too big to fit in memory because of the characteristics of the audio data 61 * (high sampling rate, bits per sample ...)</li> 62 * <li>received or generated while previously queued audio is playing.</li> 63 * </ul> 64 * 65 * The static mode should be chosen when dealing with short sounds that fit in memory and 66 * that need to be played with the smallest latency possible. The static mode will 67 * therefore be preferred for UI and game sounds that are played often, and with the 68 * smallest overhead possible. 69 * 70 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 71 * The size of this buffer, specified during the construction, determines how long an AudioTrack 72 * can play before running out of data.<br> 73 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 74 * be played from it.<br> 75 * For the streaming mode, data will be written to the audio sink in chunks of 76 * sizes less than or equal to the total buffer size. 77 * 78 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 79 */ 80public class AudioTrack implements AudioRouting 81{ 82 //--------------------------------------------------------- 83 // Constants 84 //-------------------- 85 /** Minimum value for a linear gain or auxiliary effect level. 86 * This value must be exactly equal to 0.0f; do not change it. 87 */ 88 private static final float GAIN_MIN = 0.0f; 89 /** Maximum value for a linear gain or auxiliary effect level. 90 * This value must be greater than or equal to 1.0f. 91 */ 92 private static final float GAIN_MAX = 1.0f; 93 94 /** Maximum value for AudioTrack channel count 95 * @hide public for MediaCode only, do not un-hide or change to a numeric literal 96 */ 97 public static final int CHANNEL_COUNT_MAX = native_get_FCC_8(); 98 99 /** indicates AudioTrack state is stopped */ 100 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 101 /** indicates AudioTrack state is paused */ 102 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 103 /** indicates AudioTrack state is playing */ 104 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 105 106 // keep these values in sync with android_media_AudioTrack.cpp 107 /** 108 * Creation mode where audio data is transferred from Java to the native layer 109 * only once before the audio starts playing. 110 */ 111 public static final int MODE_STATIC = 0; 112 /** 113 * Creation mode where audio data is streamed from Java to the native layer 114 * as the audio is playing. 115 */ 116 public static final int MODE_STREAM = 1; 117 118 /** @hide */ 119 @IntDef({ 120 MODE_STATIC, 121 MODE_STREAM 122 }) 123 @Retention(RetentionPolicy.SOURCE) 124 public @interface TransferMode {} 125 126 /** 127 * State of an AudioTrack that was not successfully initialized upon creation. 128 */ 129 public static final int STATE_UNINITIALIZED = 0; 130 /** 131 * State of an AudioTrack that is ready to be used. 132 */ 133 public static final int STATE_INITIALIZED = 1; 134 /** 135 * State of a successfully initialized AudioTrack that uses static data, 136 * but that hasn't received that data yet. 137 */ 138 public static final int STATE_NO_STATIC_DATA = 2; 139 140 /** 141 * Denotes a successful operation. 142 */ 143 public static final int SUCCESS = AudioSystem.SUCCESS; 144 /** 145 * Denotes a generic operation failure. 146 */ 147 public static final int ERROR = AudioSystem.ERROR; 148 /** 149 * Denotes a failure due to the use of an invalid value. 150 */ 151 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 152 /** 153 * Denotes a failure due to the improper use of a method. 154 */ 155 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 156 /** 157 * An error code indicating that the object reporting it is no longer valid and needs to 158 * be recreated. 159 * @hide 160 */ 161 public static final int ERROR_DEAD_OBJECT = AudioSystem.DEAD_OBJECT; 162 /** 163 * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state, 164 * or immediately after start/ACTIVE. 165 * @hide 166 */ 167 public static final int ERROR_WOULD_BLOCK = AudioSystem.WOULD_BLOCK; 168 169 // Error codes: 170 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 171 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 172 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 173 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 174 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 175 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 176 177 // Events: 178 // to keep in sync with frameworks/av/include/media/AudioTrack.h 179 /** 180 * Event id denotes when playback head has reached a previously set marker. 181 */ 182 private static final int NATIVE_EVENT_MARKER = 3; 183 /** 184 * Event id denotes when previously set update period has elapsed during playback. 185 */ 186 private static final int NATIVE_EVENT_NEW_POS = 4; 187 188 private final static String TAG = "android.media.AudioTrack"; 189 190 191 /** @hide */ 192 @IntDef({ 193 WRITE_BLOCKING, 194 WRITE_NON_BLOCKING 195 }) 196 @Retention(RetentionPolicy.SOURCE) 197 public @interface WriteMode {} 198 199 /** 200 * The write mode indicating the write operation will block until all data has been written, 201 * to be used as the actual value of the writeMode parameter in 202 * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)}, 203 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 204 * {@link #write(ByteBuffer, int, int, long)}. 205 */ 206 public final static int WRITE_BLOCKING = 0; 207 208 /** 209 * The write mode indicating the write operation will return immediately after 210 * queuing as much audio data for playback as possible without blocking, 211 * to be used as the actual value of the writeMode parameter in 212 * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)}, 213 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 214 * {@link #write(ByteBuffer, int, int, long)}. 215 */ 216 public final static int WRITE_NON_BLOCKING = 1; 217 218 //-------------------------------------------------------------------------- 219 // Member variables 220 //-------------------- 221 /** 222 * Indicates the state of the AudioTrack instance. 223 * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA. 224 */ 225 private int mState = STATE_UNINITIALIZED; 226 /** 227 * Indicates the play state of the AudioTrack instance. 228 * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING. 229 */ 230 private int mPlayState = PLAYSTATE_STOPPED; 231 /** 232 * Lock to ensure mPlayState updates reflect the actual state of the object. 233 */ 234 private final Object mPlayStateLock = new Object(); 235 /** 236 * Sizes of the native audio buffer. 237 * These values are set during construction and can be stale. 238 * To obtain the current native audio buffer frame count use {@link #getBufferSizeInFrames()}. 239 */ 240 private int mNativeBufferSizeInBytes = 0; 241 private int mNativeBufferSizeInFrames = 0; 242 /** 243 * Handler for events coming from the native code. 244 */ 245 private NativePositionEventHandlerDelegate mEventHandlerDelegate; 246 /** 247 * Looper associated with the thread that creates the AudioTrack instance. 248 */ 249 private final Looper mInitializationLooper; 250 /** 251 * The audio data source sampling rate in Hz. 252 * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}. 253 */ 254 private int mSampleRate; // initialized by all constructors via audioParamCheck() 255 /** 256 * The number of audio output channels (1 is mono, 2 is stereo, etc.). 257 */ 258 private int mChannelCount = 1; 259 /** 260 * The audio channel mask used for calling native AudioTrack 261 */ 262 private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 263 264 /** 265 * The type of the audio stream to play. See 266 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 267 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 268 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 269 * {@link AudioManager#STREAM_DTMF}. 270 */ 271 private int mStreamType = AudioManager.STREAM_MUSIC; 272 273 private final AudioAttributes mAttributes; 274 /** 275 * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM. 276 */ 277 private int mDataLoadMode = MODE_STREAM; 278 /** 279 * The current channel position mask, as specified on AudioTrack creation. 280 * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}. 281 * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified. 282 */ 283 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 284 /** 285 * The channel index mask if specified, otherwise 0. 286 */ 287 private int mChannelIndexMask = 0; 288 /** 289 * The encoding of the audio samples. 290 * @see AudioFormat#ENCODING_PCM_8BIT 291 * @see AudioFormat#ENCODING_PCM_16BIT 292 * @see AudioFormat#ENCODING_PCM_FLOAT 293 */ 294 private int mAudioFormat; // initialized by all constructors via audioParamCheck() 295 /** 296 * Audio session ID 297 */ 298 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 299 /** 300 * Reference to the app-ops service. 301 */ 302 private final IAppOpsService mAppOps; 303 /** 304 * HW_AV_SYNC track AV Sync Header 305 */ 306 private ByteBuffer mAvSyncHeader = null; 307 /** 308 * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header 309 */ 310 private int mAvSyncBytesRemaining = 0; 311 312 //-------------------------------- 313 // Used exclusively by native code 314 //-------------------- 315 /** 316 * @hide 317 * Accessed by native methods: provides access to C++ AudioTrack object. 318 */ 319 @SuppressWarnings("unused") 320 protected long mNativeTrackInJavaObj; 321 /** 322 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 323 * the native AudioTrack object, but not stored in it). 324 */ 325 @SuppressWarnings("unused") 326 private long mJniData; 327 328 329 //-------------------------------------------------------------------------- 330 // Constructor, Finalize 331 //-------------------- 332 /** 333 * Class constructor. 334 * @param streamType the type of the audio stream. See 335 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 336 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 337 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 338 * @param sampleRateInHz the initial source sample rate expressed in Hz. 339 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 340 * which is usually the sample rate of the sink. 341 * {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen. 342 * @param channelConfig describes the configuration of the audio channels. 343 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 344 * {@link AudioFormat#CHANNEL_OUT_STEREO} 345 * @param audioFormat the format in which the audio data is represented. 346 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 347 * {@link AudioFormat#ENCODING_PCM_8BIT}, 348 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 349 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 350 * read from for playback. This should be a multiple of the frame size in bytes. 351 * <p> If the track's creation mode is {@link #MODE_STATIC}, 352 * this is the maximum length sample, or audio clip, that can be played by this instance. 353 * <p> If the track's creation mode is {@link #MODE_STREAM}, 354 * this should be the desired buffer size 355 * for the <code>AudioTrack</code> to satisfy the application's 356 * natural latency requirements. 357 * If <code>bufferSizeInBytes</code> is less than the 358 * minimum buffer size for the output sink, it is automatically increased to the minimum 359 * buffer size. 360 * The method {@link #getBufferSizeInFrames()} returns the 361 * actual size in frames of the native buffer created, which 362 * determines the frequency to write 363 * to the streaming <code>AudioTrack</code> to avoid underrun. 364 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 365 * @throws java.lang.IllegalArgumentException 366 */ 367 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 368 int bufferSizeInBytes, int mode) 369 throws IllegalArgumentException { 370 this(streamType, sampleRateInHz, channelConfig, audioFormat, 371 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE); 372 } 373 374 /** 375 * Class constructor with audio session. Use this constructor when the AudioTrack must be 376 * attached to a particular audio session. The primary use of the audio session ID is to 377 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 378 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 379 * and media players in the same session and not to the output mix. 380 * When an AudioTrack is created without specifying a session, it will create its own session 381 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 382 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 383 * session 384 * with all other media players or audio tracks in the same session, otherwise a new session 385 * will be created for this track if none is supplied. 386 * @param streamType the type of the audio stream. See 387 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 388 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 389 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 390 * @param sampleRateInHz the initial source sample rate expressed in Hz. 391 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 392 * which is usually the sample rate of the sink. 393 * @param channelConfig describes the configuration of the audio channels. 394 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 395 * {@link AudioFormat#CHANNEL_OUT_STEREO} 396 * @param audioFormat the format in which the audio data is represented. 397 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 398 * {@link AudioFormat#ENCODING_PCM_8BIT}, 399 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 400 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 401 * from for playback. If using the AudioTrack in streaming mode, you can write data into 402 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 403 * this is the maximum size of the sound that will be played for this instance. 404 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 405 * for the successful creation of an AudioTrack instance in streaming mode. Using values 406 * smaller than getMinBufferSize() will result in an initialization failure. 407 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 408 * @param sessionId Id of audio session the AudioTrack must be attached to 409 * @throws java.lang.IllegalArgumentException 410 */ 411 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 412 int bufferSizeInBytes, int mode, int sessionId) 413 throws IllegalArgumentException { 414 // mState already == STATE_UNINITIALIZED 415 this((new AudioAttributes.Builder()) 416 .setLegacyStreamType(streamType) 417 .build(), 418 (new AudioFormat.Builder()) 419 .setChannelMask(channelConfig) 420 .setEncoding(audioFormat) 421 .setSampleRate(sampleRateInHz) 422 .build(), 423 bufferSizeInBytes, 424 mode, sessionId); 425 } 426 427 /** 428 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 429 * @param attributes a non-null {@link AudioAttributes} instance. 430 * @param format a non-null {@link AudioFormat} instance describing the format of the data 431 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 432 * configuring the audio format parameters such as encoding, channel mask and sample rate. 433 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 434 * from for playback. If using the AudioTrack in streaming mode, you can write data into 435 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 436 * this is the maximum size of the sound that will be played for this instance. 437 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 438 * for the successful creation of an AudioTrack instance in streaming mode. Using values 439 * smaller than getMinBufferSize() will result in an initialization failure. 440 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 441 * @param sessionId ID of audio session the AudioTrack must be attached to, or 442 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 443 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 444 * construction. 445 * @throws IllegalArgumentException 446 */ 447 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 448 int mode, int sessionId) 449 throws IllegalArgumentException { 450 // mState already == STATE_UNINITIALIZED 451 452 if (attributes == null) { 453 throw new IllegalArgumentException("Illegal null AudioAttributes"); 454 } 455 if (format == null) { 456 throw new IllegalArgumentException("Illegal null AudioFormat"); 457 } 458 459 // remember which looper is associated with the AudioTrack instantiation 460 Looper looper; 461 if ((looper = Looper.myLooper()) == null) { 462 looper = Looper.getMainLooper(); 463 } 464 465 int rate = format.getSampleRate(); 466 if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 467 rate = 0; 468 } 469 470 int channelIndexMask = 0; 471 if ((format.getPropertySetMask() 472 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { 473 channelIndexMask = format.getChannelIndexMask(); 474 } 475 int channelMask = 0; 476 if ((format.getPropertySetMask() 477 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { 478 channelMask = format.getChannelMask(); 479 } else if (channelIndexMask == 0) { // if no masks at all, use stereo 480 channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT 481 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 482 } 483 int encoding = AudioFormat.ENCODING_DEFAULT; 484 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 485 encoding = format.getEncoding(); 486 } 487 audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); 488 mStreamType = AudioSystem.STREAM_DEFAULT; 489 490 audioBuffSizeCheck(bufferSizeInBytes); 491 492 mInitializationLooper = looper; 493 IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE); 494 mAppOps = IAppOpsService.Stub.asInterface(b); 495 496 mAttributes = new AudioAttributes.Builder(attributes).build(); 497 498 if (sessionId < 0) { 499 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 500 } 501 502 int[] sampleRate = new int[] {mSampleRate}; 503 int[] session = new int[1]; 504 session[0] = sessionId; 505 // native initialization 506 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 507 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat, 508 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/); 509 if (initResult != SUCCESS) { 510 loge("Error code "+initResult+" when initializing AudioTrack."); 511 return; // with mState == STATE_UNINITIALIZED 512 } 513 514 mSampleRate = sampleRate[0]; 515 mSessionId = session[0]; 516 517 if (mDataLoadMode == MODE_STATIC) { 518 mState = STATE_NO_STATIC_DATA; 519 } else { 520 mState = STATE_INITIALIZED; 521 } 522 } 523 524 /** 525 * A constructor which explicitly connects a Native (C++) AudioTrack. For use by 526 * the AudioTrackRoutingProxy subclass. 527 * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack 528 * (associated with an OpenSL ES player). 529 */ 530 /*package*/ AudioTrack(long nativeTrackInJavaObj) { 531 // "final"s 532 mAttributes = null; 533 mAppOps = null; 534 535 // remember which looper is associated with the AudioTrack instantiation 536 Looper looper; 537 if ((looper = Looper.myLooper()) == null) { 538 looper = Looper.getMainLooper(); 539 } 540 mInitializationLooper = looper; 541 542 // other initialization... 543 // Note that for this native_setup, we are providing an already created/initialized 544 // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored. 545 int[] session = { 0 }; 546 int[] rates = { 0 }; 547 int initResult = native_setup(new WeakReference<AudioTrack>(this), 548 null /*mAttributes - NA*/, 549 rates /*sampleRate - NA*/, 550 0 /*mChannelMask - NA*/, 551 0 /*mChannelIndexMask - NA*/, 552 0 /*mAudioFormat - NA*/, 553 0 /*mNativeBufferSizeInBytes - NA*/, 554 0 /*mDataLoadMode - NA*/, 555 session, 556 nativeTrackInJavaObj); 557 if (initResult != SUCCESS) { 558 loge("Error code "+initResult+" when initializing AudioTrack."); 559 return; // with mState == STATE_UNINITIALIZED 560 } 561 562 mSessionId = session[0]; 563 564 mState = STATE_INITIALIZED; 565 } 566 567 /** 568 * Builder class for {@link AudioTrack} objects. 569 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 570 * attributes and audio format parameters, you indicate which of those vary from the default 571 * behavior on the device. 572 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 573 * parameters, to be used by a new <code>AudioTrack</code> instance: 574 * 575 * <pre class="prettyprint"> 576 * AudioTrack player = new AudioTrack.Builder() 577 * .setAudioAttributes(new AudioAttributes.Builder() 578 * .setUsage(AudioAttributes.USAGE_ALARM) 579 * .setContentType(CONTENT_TYPE_MUSIC) 580 * .build()) 581 * .setAudioFormat(new AudioFormat.Builder() 582 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 583 * .setSampleRate(441000) 584 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 585 * .build()) 586 * .setBufferSize(minBuffSize) 587 * .build(); 588 * </pre> 589 * <p> 590 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 591 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 592 * <br>If the audio format is not specified or is incomplete, its sample rate will be the 593 * default output sample rate of the device (see 594 * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be 595 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 596 * {@link AudioFormat#ENCODING_PCM_16BIT}. 597 * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)}, 598 * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used. 599 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 600 * <code>MODE_STREAM</code> will be used. 601 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 602 * be generated. 603 */ 604 public static class Builder { 605 private AudioAttributes mAttributes; 606 private AudioFormat mFormat; 607 private int mBufferSizeInBytes; 608 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 609 private int mMode = MODE_STREAM; 610 611 /** 612 * Constructs a new Builder with the default values as described above. 613 */ 614 public Builder() { 615 } 616 617 /** 618 * Sets the {@link AudioAttributes}. 619 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 620 * data to be played. 621 * @return the same Builder instance. 622 * @throws IllegalArgumentException 623 */ 624 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 625 throws IllegalArgumentException { 626 if (attributes == null) { 627 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 628 } 629 // keep reference, we only copy the data when building 630 mAttributes = attributes; 631 return this; 632 } 633 634 /** 635 * Sets the format of the audio data to be played by the {@link AudioTrack}. 636 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 637 * as encoding, channel mask and sample rate. 638 * @param format a non-null {@link AudioFormat} instance. 639 * @return the same Builder instance. 640 * @throws IllegalArgumentException 641 */ 642 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 643 throws IllegalArgumentException { 644 if (format == null) { 645 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 646 } 647 // keep reference, we only copy the data when building 648 mFormat = format; 649 return this; 650 } 651 652 /** 653 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 654 * If using the {@link AudioTrack} in streaming mode 655 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 656 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 657 * the minimum required buffer size for the successful creation of an AudioTrack instance 658 * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result 659 * in an exception when trying to build the <code>AudioTrack</code>. 660 * <br>If using the <code>AudioTrack</code> in static mode (see 661 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 662 * played by this instance. 663 * @param bufferSizeInBytes 664 * @return the same Builder instance. 665 * @throws IllegalArgumentException 666 */ 667 public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes) 668 throws IllegalArgumentException { 669 if (bufferSizeInBytes <= 0) { 670 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 671 } 672 mBufferSizeInBytes = bufferSizeInBytes; 673 return this; 674 } 675 676 /** 677 * Sets the mode under which buffers of audio data are transferred from the 678 * {@link AudioTrack} to the framework. 679 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 680 * @return the same Builder instance. 681 * @throws IllegalArgumentException 682 */ 683 public @NonNull Builder setTransferMode(@TransferMode int mode) 684 throws IllegalArgumentException { 685 switch(mode) { 686 case MODE_STREAM: 687 case MODE_STATIC: 688 mMode = mode; 689 break; 690 default: 691 throw new IllegalArgumentException("Invalid transfer mode " + mode); 692 } 693 return this; 694 } 695 696 /** 697 * Sets the session ID the {@link AudioTrack} will be attached to. 698 * @param sessionId a strictly positive ID number retrieved from another 699 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 700 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 701 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 702 * @return the same Builder instance. 703 * @throws IllegalArgumentException 704 */ 705 public @NonNull Builder setSessionId(int sessionId) 706 throws IllegalArgumentException { 707 if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 708 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 709 } 710 mSessionId = sessionId; 711 return this; 712 } 713 714 /** 715 * Builds an {@link AudioTrack} instance initialized with all the parameters set 716 * on this <code>Builder</code>. 717 * @return a new successfully initialized {@link AudioTrack} instance. 718 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 719 * were incompatible, or if they are not supported by the device, 720 * or if the device was not available. 721 */ 722 public @NonNull AudioTrack build() throws UnsupportedOperationException { 723 if (mAttributes == null) { 724 mAttributes = new AudioAttributes.Builder() 725 .setUsage(AudioAttributes.USAGE_MEDIA) 726 .build(); 727 } 728 if (mFormat == null) { 729 mFormat = new AudioFormat.Builder() 730 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 731 //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED) 732 .setEncoding(AudioFormat.ENCODING_DEFAULT) 733 .build(); 734 } 735 try { 736 // If the buffer size is not specified in streaming mode, 737 // use a single frame for the buffer size and let the 738 // native code figure out the minimum buffer size. 739 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) { 740 mBufferSizeInBytes = mFormat.getChannelCount() 741 * mFormat.getBytesPerSample(mFormat.getEncoding()); 742 } 743 final AudioTrack track = new AudioTrack( 744 mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId); 745 if (track.getState() == STATE_UNINITIALIZED) { 746 // release is not necessary 747 throw new UnsupportedOperationException("Cannot create AudioTrack"); 748 } 749 return track; 750 } catch (IllegalArgumentException e) { 751 throw new UnsupportedOperationException(e.getMessage()); 752 } 753 } 754 } 755 756 // mask of all the positional channels supported, however the allowed combinations 757 // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX 758 private static final int SUPPORTED_OUT_CHANNELS = 759 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 760 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 761 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 762 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 763 AudioFormat.CHANNEL_OUT_BACK_LEFT | 764 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 765 AudioFormat.CHANNEL_OUT_BACK_CENTER | 766 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 767 AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 768 769 // Convenience method for the constructor's parameter checks. 770 // This is where constructor IllegalArgumentException-s are thrown 771 // postconditions: 772 // mChannelCount is valid 773 // mChannelMask is valid 774 // mAudioFormat is valid 775 // mSampleRate is valid 776 // mDataLoadMode is valid 777 private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, 778 int audioFormat, int mode) { 779 //-------------- 780 // sample rate, note these values are subject to change 781 if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN || 782 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) && 783 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 784 throw new IllegalArgumentException(sampleRateInHz 785 + "Hz is not a supported sample rate."); 786 } 787 mSampleRate = sampleRateInHz; 788 789 //-------------- 790 // channel config 791 mChannelConfiguration = channelConfig; 792 793 switch (channelConfig) { 794 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 795 case AudioFormat.CHANNEL_OUT_MONO: 796 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 797 mChannelCount = 1; 798 mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 799 break; 800 case AudioFormat.CHANNEL_OUT_STEREO: 801 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 802 mChannelCount = 2; 803 mChannelMask = AudioFormat.CHANNEL_OUT_STEREO; 804 break; 805 default: 806 if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) { 807 mChannelCount = 0; 808 break; // channel index configuration only 809 } 810 if (!isMultichannelConfigSupported(channelConfig)) { 811 // input channel configuration features unsupported channels 812 throw new IllegalArgumentException("Unsupported channel configuration."); 813 } 814 mChannelMask = channelConfig; 815 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 816 } 817 // check the channel index configuration (if present) 818 mChannelIndexMask = channelIndexMask; 819 if (mChannelIndexMask != 0) { 820 // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2 821 final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1; 822 if ((channelIndexMask & ~indexMask) != 0) { 823 throw new IllegalArgumentException("Unsupported channel index configuration " 824 + channelIndexMask); 825 } 826 int channelIndexCount = Integer.bitCount(channelIndexMask); 827 if (mChannelCount == 0) { 828 mChannelCount = channelIndexCount; 829 } else if (mChannelCount != channelIndexCount) { 830 throw new IllegalArgumentException("Channel count must match"); 831 } 832 } 833 834 //-------------- 835 // audio format 836 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 837 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 838 } 839 840 if (!AudioFormat.isPublicEncoding(audioFormat)) { 841 throw new IllegalArgumentException("Unsupported audio encoding."); 842 } 843 mAudioFormat = audioFormat; 844 845 //-------------- 846 // audio load mode 847 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 848 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 849 throw new IllegalArgumentException("Invalid mode."); 850 } 851 mDataLoadMode = mode; 852 } 853 854 /** 855 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 856 * @param channelConfig the mask to validate 857 * @return false if the AudioTrack can't be used with such a mask 858 */ 859 private static boolean isMultichannelConfigSupported(int channelConfig) { 860 // check for unsupported channels 861 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 862 loge("Channel configuration features unsupported channels"); 863 return false; 864 } 865 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 866 if (channelCount > CHANNEL_COUNT_MAX) { 867 loge("Channel configuration contains too many channels " + 868 channelCount + ">" + CHANNEL_COUNT_MAX); 869 return false; 870 } 871 // check for unsupported multichannel combinations: 872 // - FL/FR must be present 873 // - L/R channels must be paired (e.g. no single L channel) 874 final int frontPair = 875 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 876 if ((channelConfig & frontPair) != frontPair) { 877 loge("Front channels must be present in multichannel configurations"); 878 return false; 879 } 880 final int backPair = 881 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 882 if ((channelConfig & backPair) != 0) { 883 if ((channelConfig & backPair) != backPair) { 884 loge("Rear channels can't be used independently"); 885 return false; 886 } 887 } 888 final int sidePair = 889 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 890 if ((channelConfig & sidePair) != 0 891 && (channelConfig & sidePair) != sidePair) { 892 loge("Side channels can't be used independently"); 893 return false; 894 } 895 return true; 896 } 897 898 899 // Convenience method for the constructor's audio buffer size check. 900 // preconditions: 901 // mChannelCount is valid 902 // mAudioFormat is valid 903 // postcondition: 904 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) 905 private void audioBuffSizeCheck(int audioBufferSize) { 906 // NB: this section is only valid with PCM or IEC61937 data. 907 // To update when supporting compressed formats 908 int frameSizeInBytes; 909 if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) { 910 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat); 911 } else { 912 frameSizeInBytes = 1; 913 } 914 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 915 throw new IllegalArgumentException("Invalid audio buffer size."); 916 } 917 918 mNativeBufferSizeInBytes = audioBufferSize; 919 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 920 } 921 922 923 /** 924 * Releases the native AudioTrack resources. 925 */ 926 public void release() { 927 // even though native_release() stops the native AudioTrack, we need to stop 928 // AudioTrack subclasses too. 929 try { 930 stop(); 931 } catch(IllegalStateException ise) { 932 // don't raise an exception, we're releasing the resources. 933 } 934 native_release(); 935 mState = STATE_UNINITIALIZED; 936 } 937 938 @Override 939 protected void finalize() { 940 native_finalize(); 941 } 942 943 //-------------------------------------------------------------------------- 944 // Getters 945 //-------------------- 946 /** 947 * Returns the minimum gain value, which is the constant 0.0. 948 * Gain values less than 0.0 will be clamped to 0.0. 949 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 950 * @return the minimum value, which is the constant 0.0. 951 */ 952 static public float getMinVolume() { 953 return GAIN_MIN; 954 } 955 956 /** 957 * Returns the maximum gain value, which is greater than or equal to 1.0. 958 * Gain values greater than the maximum will be clamped to the maximum. 959 * <p>The word "volume" in the API name is historical; this is actually a gain. 960 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 961 * corresponds to a gain of 0 dB (sample values left unmodified). 962 * @return the maximum value, which is greater than or equal to 1.0. 963 */ 964 static public float getMaxVolume() { 965 return GAIN_MAX; 966 } 967 968 /** 969 * Returns the configured audio source sample rate in Hz. 970 * The initial source sample rate depends on the constructor parameters, 971 * but the source sample rate may change if {@link #setPlaybackRate(int)} is called. 972 * If the constructor had a specific sample rate, then the initial sink sample rate is that 973 * value. 974 * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}, 975 * then the initial sink sample rate is a route-dependent default value based on the source [sic]. 976 */ 977 public int getSampleRate() { 978 return mSampleRate; 979 } 980 981 /** 982 * Returns the current playback sample rate rate in Hz. 983 */ 984 public int getPlaybackRate() { 985 return native_get_playback_rate(); 986 } 987 988 /** 989 * Returns the current playback parameters. 990 * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters 991 * @return current {@link PlaybackParams}. 992 * @throws IllegalStateException if track is not initialized. 993 */ 994 public @NonNull PlaybackParams getPlaybackParams() { 995 return native_get_playback_params(); 996 } 997 998 /** 999 * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT}, 1000 * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1001 */ 1002 public int getAudioFormat() { 1003 return mAudioFormat; 1004 } 1005 1006 /** 1007 * Returns the type of audio stream this AudioTrack is configured for. 1008 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 1009 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 1010 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 1011 * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}. 1012 */ 1013 public int getStreamType() { 1014 return mStreamType; 1015 } 1016 1017 /** 1018 * Returns the configured channel position mask. 1019 * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO}, 1020 * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}. 1021 * This method may return {@link AudioFormat#CHANNEL_INVALID} if 1022 * a channel index mask was used. Consider 1023 * {@link #getFormat()} instead, to obtain an {@link AudioFormat}, 1024 * which contains both the channel position mask and the channel index mask. 1025 */ 1026 public int getChannelConfiguration() { 1027 return mChannelConfiguration; 1028 } 1029 1030 /** 1031 * Returns the configured <code>AudioTrack</code> format. 1032 * @return an {@link AudioFormat} containing the 1033 * <code>AudioTrack</code> parameters at the time of configuration. 1034 */ 1035 public @NonNull AudioFormat getFormat() { 1036 AudioFormat.Builder builder = new AudioFormat.Builder() 1037 .setSampleRate(mSampleRate) 1038 .setEncoding(mAudioFormat); 1039 if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) { 1040 builder.setChannelMask(mChannelConfiguration); 1041 } 1042 if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) { 1043 builder.setChannelIndexMask(mChannelIndexMask); 1044 } 1045 return builder.build(); 1046 } 1047 1048 /** 1049 * Returns the configured number of channels. 1050 */ 1051 public int getChannelCount() { 1052 return mChannelCount; 1053 } 1054 1055 /** 1056 * Returns the state of the AudioTrack instance. This is useful after the 1057 * AudioTrack instance has been created to check if it was initialized 1058 * properly. This ensures that the appropriate resources have been acquired. 1059 * @see #STATE_UNINITIALIZED 1060 * @see #STATE_INITIALIZED 1061 * @see #STATE_NO_STATIC_DATA 1062 */ 1063 public int getState() { 1064 return mState; 1065 } 1066 1067 /** 1068 * Returns the playback state of the AudioTrack instance. 1069 * @see #PLAYSTATE_STOPPED 1070 * @see #PLAYSTATE_PAUSED 1071 * @see #PLAYSTATE_PLAYING 1072 */ 1073 public int getPlayState() { 1074 synchronized (mPlayStateLock) { 1075 return mPlayState; 1076 } 1077 } 1078 1079 1080 /** 1081 * Returns the effective size of the <code>AudioTrack</code> buffer 1082 * that the application writes to. 1083 * <p> This will be less than or equal to the result of 1084 * {@link #getBufferCapacityInFrames()}. 1085 * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called. 1086 * <p> If the track is subsequently routed to a different output sink, the buffer 1087 * size and capacity may enlarge to accommodate. 1088 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1089 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1090 * the size of the native <code>AudioTrack</code> buffer in bytes. 1091 * <p> See also {@link AudioManager#getProperty(String)} for key 1092 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1093 * @return current size in frames of the <code>AudioTrack</code> buffer. 1094 * @throws IllegalStateException 1095 */ 1096 public int getBufferSizeInFrames() { 1097 return native_get_buffer_size_frames(); 1098 } 1099 1100 /** 1101 * Limits the effective size of the <code>AudioTrack</code> buffer 1102 * that the application writes to. 1103 * <p> A write to this AudioTrack will not fill the buffer beyond this limit. 1104 * If a blocking write is used then the write will block until the the data 1105 * can fit within this limit. 1106 * <p>Changing this limit modifies the latency associated with 1107 * the buffer for this track. A smaller size will give lower latency 1108 * but there may be more glitches due to buffer underruns. 1109 * <p>The actual size used may not be equal to this requested size. 1110 * It will be limited to a valid range with a maximum of 1111 * {@link #getBufferCapacityInFrames()}. 1112 * It may also be adjusted slightly for internal reasons. 1113 * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE} 1114 * will be returned. 1115 * <p>This method is only supported for PCM audio. 1116 * It is not supported for compressed audio tracks. 1117 * 1118 * @param bufferSizeInFrames requested buffer size 1119 * @return the actual buffer size in frames or an error code, 1120 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION} 1121 * @throws IllegalStateException 1122 */ 1123 public int setBufferSizeInFrames(int bufferSizeInFrames) { 1124 if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) { 1125 return ERROR_INVALID_OPERATION; 1126 } 1127 if (bufferSizeInFrames < 0) { 1128 return ERROR_BAD_VALUE; 1129 } 1130 return native_set_buffer_size_frames(bufferSizeInFrames); 1131 } 1132 1133 /** 1134 * Returns the maximum size of the native <code>AudioTrack</code> buffer. 1135 * <p> If the track's creation mode is {@link #MODE_STATIC}, 1136 * it is equal to the specified bufferSizeInBytes on construction, converted to frame units. 1137 * A static track's native frame count will not change. 1138 * <p> If the track's creation mode is {@link #MODE_STREAM}, 1139 * it is greater than or equal to the specified bufferSizeInBytes converted to frame units. 1140 * For streaming tracks, this value may be rounded up to a larger value if needed by 1141 * the target output sink, and 1142 * if the track is subsequently routed to a different output sink, the native 1143 * frame count may enlarge to accommodate. 1144 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1145 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1146 * the size of the native <code>AudioTrack</code> buffer in bytes. 1147 * <p> See also {@link AudioManager#getProperty(String)} for key 1148 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1149 * @return maximum size in frames of the <code>AudioTrack</code> buffer. 1150 * @throws IllegalStateException 1151 */ 1152 public int getBufferCapacityInFrames() { 1153 return native_get_buffer_capacity_frames(); 1154 } 1155 1156 /** 1157 * Returns the frame count of the native <code>AudioTrack</code> buffer. 1158 * @return current size in frames of the <code>AudioTrack</code> buffer. 1159 * @throws IllegalStateException 1160 * @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead. 1161 */ 1162 @Deprecated 1163 protected int getNativeFrameCount() { 1164 return native_get_buffer_capacity_frames(); 1165 } 1166 1167 /** 1168 * Returns marker position expressed in frames. 1169 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 1170 * or zero if marker is disabled. 1171 */ 1172 public int getNotificationMarkerPosition() { 1173 return native_get_marker_pos(); 1174 } 1175 1176 /** 1177 * Returns the notification update period expressed in frames. 1178 * Zero means that no position update notifications are being delivered. 1179 */ 1180 public int getPositionNotificationPeriod() { 1181 return native_get_pos_update_period(); 1182 } 1183 1184 /** 1185 * Returns the playback head position expressed in frames. 1186 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 1187 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 1188 * This is a continuously advancing counter. It will wrap (overflow) periodically, 1189 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 1190 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 1191 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 1192 * the total number of frames played since reset, 1193 * <i>not</i> the current offset within the buffer. 1194 */ 1195 public int getPlaybackHeadPosition() { 1196 return native_get_position(); 1197 } 1198 1199 /** 1200 * Returns this track's estimated latency in milliseconds. This includes the latency due 1201 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 1202 * 1203 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 1204 * a better solution. 1205 * @hide 1206 */ 1207 public int getLatency() { 1208 return native_get_latency(); 1209 } 1210 1211 /** 1212 * Returns the number of underrun occurrences in the application-level write buffer 1213 * since the AudioTrack was created. 1214 * An underrun occurs if the application does not write audio 1215 * data quickly enough, causing the buffer to underflow 1216 * and a potential audio glitch or pop. 1217 * <p> 1218 * Underruns are less likely when buffer sizes are large. 1219 * It may be possible to eliminate underruns by recreating the AudioTrack with 1220 * a larger buffer. 1221 * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the 1222 * effective size of the buffer. 1223 */ 1224 public int getUnderrunCount() { 1225 return native_get_underrun_count(); 1226 } 1227 1228 /** 1229 * Returns the output sample rate in Hz for the specified stream type. 1230 */ 1231 static public int getNativeOutputSampleRate(int streamType) { 1232 return native_get_output_sample_rate(streamType); 1233 } 1234 1235 /** 1236 * Returns the minimum buffer size required for the successful creation of an AudioTrack 1237 * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't 1238 * guarantee a smooth playback under load, and higher values should be chosen according to 1239 * the expected frequency at which the buffer will be refilled with additional data to play. 1240 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 1241 * to a higher value than the initial source sample rate, be sure to configure the buffer size 1242 * based on the highest planned sample rate. 1243 * @param sampleRateInHz the source sample rate expressed in Hz. 1244 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted. 1245 * @param channelConfig describes the configuration of the audio channels. 1246 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 1247 * {@link AudioFormat#CHANNEL_OUT_STEREO} 1248 * @param audioFormat the format in which the audio data is represented. 1249 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 1250 * {@link AudioFormat#ENCODING_PCM_8BIT}, 1251 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1252 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 1253 * or {@link #ERROR} if unable to query for output properties, 1254 * or the minimum buffer size expressed in bytes. 1255 */ 1256 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 1257 int channelCount = 0; 1258 switch(channelConfig) { 1259 case AudioFormat.CHANNEL_OUT_MONO: 1260 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1261 channelCount = 1; 1262 break; 1263 case AudioFormat.CHANNEL_OUT_STEREO: 1264 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1265 channelCount = 2; 1266 break; 1267 default: 1268 if (!isMultichannelConfigSupported(channelConfig)) { 1269 loge("getMinBufferSize(): Invalid channel configuration."); 1270 return ERROR_BAD_VALUE; 1271 } else { 1272 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1273 } 1274 } 1275 1276 if (!AudioFormat.isPublicEncoding(audioFormat)) { 1277 loge("getMinBufferSize(): Invalid audio format."); 1278 return ERROR_BAD_VALUE; 1279 } 1280 1281 // sample rate, note these values are subject to change 1282 // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed 1283 if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) || 1284 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) { 1285 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 1286 return ERROR_BAD_VALUE; 1287 } 1288 1289 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 1290 if (size <= 0) { 1291 loge("getMinBufferSize(): error querying hardware"); 1292 return ERROR; 1293 } 1294 else { 1295 return size; 1296 } 1297 } 1298 1299 /** 1300 * Returns the audio session ID. 1301 * 1302 * @return the ID of the audio session this AudioTrack belongs to. 1303 */ 1304 public int getAudioSessionId() { 1305 return mSessionId; 1306 } 1307 1308 /** 1309 * Poll for a timestamp on demand. 1310 * <p> 1311 * If you need to track timestamps during initial warmup or after a routing or mode change, 1312 * you should request a new timestamp periodically until the reported timestamps 1313 * show that the frame position is advancing, or until it becomes clear that 1314 * timestamps are unavailable for this route. 1315 * <p> 1316 * After the clock is advancing at a stable rate, 1317 * query for a new timestamp approximately once every 10 seconds to once per minute. 1318 * Calling this method more often is inefficient. 1319 * It is also counter-productive to call this method more often than recommended, 1320 * because the short-term differences between successive timestamp reports are not meaningful. 1321 * If you need a high-resolution mapping between frame position and presentation time, 1322 * consider implementing that at application level, based on low-resolution timestamps. 1323 * <p> 1324 * The audio data at the returned position may either already have been 1325 * presented, or may have not yet been presented but is committed to be presented. 1326 * It is not possible to request the time corresponding to a particular position, 1327 * or to request the (fractional) position corresponding to a particular time. 1328 * If you need such features, consider implementing them at application level. 1329 * 1330 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1331 * and owned by caller. 1332 * @return true if a timestamp is available, or false if no timestamp is available. 1333 * If a timestamp if available, 1334 * the AudioTimestamp instance is filled in with a position in frame units, together 1335 * with the estimated time when that frame was presented or is committed to 1336 * be presented. 1337 * In the case that no timestamp is available, any supplied instance is left unaltered. 1338 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 1339 * or during and immediately after a route change. 1340 * A timestamp is permanently unavailable for a given route if the route does not support 1341 * timestamps. In this case, the approximate frame position can be obtained 1342 * using {@link #getPlaybackHeadPosition}. 1343 * However, it may be useful to continue to query for 1344 * timestamps occasionally, to recover after a route change. 1345 */ 1346 // Add this text when the "on new timestamp" API is added: 1347 // Use if you need to get the most recent timestamp outside of the event callback handler. 1348 public boolean getTimestamp(AudioTimestamp timestamp) 1349 { 1350 if (timestamp == null) { 1351 throw new IllegalArgumentException(); 1352 } 1353 // It's unfortunate, but we have to either create garbage every time or use synchronized 1354 long[] longArray = new long[2]; 1355 int ret = native_get_timestamp(longArray); 1356 if (ret != SUCCESS) { 1357 return false; 1358 } 1359 timestamp.framePosition = longArray[0]; 1360 timestamp.nanoTime = longArray[1]; 1361 return true; 1362 } 1363 1364 /** 1365 * Poll for a timestamp on demand. 1366 * <p> 1367 * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code. 1368 * 1369 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1370 * and owned by caller. 1371 * @return {@link #SUCCESS} if a timestamp is available 1372 * {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called 1373 * immediately after start/ACTIVE, when the number of frames consumed is less than the 1374 * overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll 1375 * again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time 1376 * for the timestamp. 1377 * {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1378 * needs to be recreated. 1379 * {@link #ERROR_INVALID_OPERATION} if current route does not support 1380 * timestamps. In this case, the approximate frame position can be obtained 1381 * using {@link #getPlaybackHeadPosition}. 1382 * 1383 * The AudioTimestamp instance is filled in with a position in frame units, together 1384 * with the estimated time when that frame was presented or is committed to 1385 * be presented. 1386 * @hide 1387 */ 1388 // Add this text when the "on new timestamp" API is added: 1389 // Use if you need to get the most recent timestamp outside of the event callback handler. 1390 public int getTimestampWithStatus(AudioTimestamp timestamp) 1391 { 1392 if (timestamp == null) { 1393 throw new IllegalArgumentException(); 1394 } 1395 // It's unfortunate, but we have to either create garbage every time or use synchronized 1396 long[] longArray = new long[2]; 1397 int ret = native_get_timestamp(longArray); 1398 timestamp.framePosition = longArray[0]; 1399 timestamp.nanoTime = longArray[1]; 1400 return ret; 1401 } 1402 1403 //-------------------------------------------------------------------------- 1404 // Initialization / configuration 1405 //-------------------- 1406 /** 1407 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1408 * for each periodic playback head position update. 1409 * Notifications will be received in the same thread as the one in which the AudioTrack 1410 * instance was created. 1411 * @param listener 1412 */ 1413 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 1414 setPlaybackPositionUpdateListener(listener, null); 1415 } 1416 1417 /** 1418 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1419 * for each periodic playback head position update. 1420 * Use this method to receive AudioTrack events in the Handler associated with another 1421 * thread than the one in which you created the AudioTrack instance. 1422 * @param listener 1423 * @param handler the Handler that will receive the event notification messages. 1424 */ 1425 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 1426 Handler handler) { 1427 if (listener != null) { 1428 mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler); 1429 } else { 1430 mEventHandlerDelegate = null; 1431 } 1432 } 1433 1434 1435 private static float clampGainOrLevel(float gainOrLevel) { 1436 if (Float.isNaN(gainOrLevel)) { 1437 throw new IllegalArgumentException(); 1438 } 1439 if (gainOrLevel < GAIN_MIN) { 1440 gainOrLevel = GAIN_MIN; 1441 } else if (gainOrLevel > GAIN_MAX) { 1442 gainOrLevel = GAIN_MAX; 1443 } 1444 return gainOrLevel; 1445 } 1446 1447 1448 /** 1449 * Sets the specified left and right output gain values on the AudioTrack. 1450 * <p>Gain values are clamped to the closed interval [0.0, max] where 1451 * max is the value of {@link #getMaxVolume}. 1452 * A value of 0.0 results in zero gain (silence), and 1453 * a value of 1.0 means unity gain (signal unchanged). 1454 * The default value is 1.0 meaning unity gain. 1455 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1456 * @param leftGain output gain for the left channel. 1457 * @param rightGain output gain for the right channel 1458 * @return error code or success, see {@link #SUCCESS}, 1459 * {@link #ERROR_INVALID_OPERATION} 1460 * @deprecated Applications should use {@link #setVolume} instead, as it 1461 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1462 */ 1463 public int setStereoVolume(float leftGain, float rightGain) { 1464 if (isRestricted()) { 1465 return SUCCESS; 1466 } 1467 if (mState == STATE_UNINITIALIZED) { 1468 return ERROR_INVALID_OPERATION; 1469 } 1470 1471 leftGain = clampGainOrLevel(leftGain); 1472 rightGain = clampGainOrLevel(rightGain); 1473 1474 native_setVolume(leftGain, rightGain); 1475 1476 return SUCCESS; 1477 } 1478 1479 1480 /** 1481 * Sets the specified output gain value on all channels of this track. 1482 * <p>Gain values are clamped to the closed interval [0.0, max] where 1483 * max is the value of {@link #getMaxVolume}. 1484 * A value of 0.0 results in zero gain (silence), and 1485 * a value of 1.0 means unity gain (signal unchanged). 1486 * The default value is 1.0 meaning unity gain. 1487 * <p>This API is preferred over {@link #setStereoVolume}, as it 1488 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1489 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1490 * @param gain output gain for all channels. 1491 * @return error code or success, see {@link #SUCCESS}, 1492 * {@link #ERROR_INVALID_OPERATION} 1493 */ 1494 public int setVolume(float gain) { 1495 return setStereoVolume(gain, gain); 1496 } 1497 1498 1499 /** 1500 * Sets the playback sample rate for this track. This sets the sampling rate at which 1501 * the audio data will be consumed and played back 1502 * (as set by the sampleRateInHz parameter in the 1503 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 1504 * not the original sampling rate of the 1505 * content. For example, setting it to half the sample rate of the content will cause the 1506 * playback to last twice as long, but will also result in a pitch shift down by one octave. 1507 * The valid sample rate range is from 1 Hz to twice the value returned by 1508 * {@link #getNativeOutputSampleRate(int)}. 1509 * Use {@link #setPlaybackParams(PlaybackParams)} for speed control. 1510 * <p> This method may also be used to repurpose an existing <code>AudioTrack</code> 1511 * for playback of content of differing sample rate, 1512 * but with identical encoding and channel mask. 1513 * @param sampleRateInHz the sample rate expressed in Hz 1514 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1515 * {@link #ERROR_INVALID_OPERATION} 1516 */ 1517 public int setPlaybackRate(int sampleRateInHz) { 1518 if (mState != STATE_INITIALIZED) { 1519 return ERROR_INVALID_OPERATION; 1520 } 1521 if (sampleRateInHz <= 0) { 1522 return ERROR_BAD_VALUE; 1523 } 1524 return native_set_playback_rate(sampleRateInHz); 1525 } 1526 1527 1528 /** 1529 * Sets the playback parameters. 1530 * This method returns failure if it cannot apply the playback parameters. 1531 * One possible cause is that the parameters for speed or pitch are out of range. 1532 * Another possible cause is that the <code>AudioTrack</code> is streaming 1533 * (see {@link #MODE_STREAM}) and the 1534 * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer 1535 * on configuration must be larger than the speed multiplied by the minimum size 1536 * {@link #getMinBufferSize(int, int, int)}) to allow proper playback. 1537 * @param params see {@link PlaybackParams}. In particular, 1538 * speed, pitch, and audio mode should be set. 1539 * @throws IllegalArgumentException if the parameters are invalid or not accepted. 1540 * @throws IllegalStateException if track is not initialized. 1541 */ 1542 public void setPlaybackParams(@NonNull PlaybackParams params) { 1543 if (params == null) { 1544 throw new IllegalArgumentException("params is null"); 1545 } 1546 native_set_playback_params(params); 1547 } 1548 1549 1550 /** 1551 * Sets the position of the notification marker. At most one marker can be active. 1552 * @param markerInFrames marker position in wrapping frame units similar to 1553 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 1554 * To set a marker at a position which would appear as zero due to wraparound, 1555 * a workaround is to use a non-zero position near zero, such as -1 or 1. 1556 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1557 * {@link #ERROR_INVALID_OPERATION} 1558 */ 1559 public int setNotificationMarkerPosition(int markerInFrames) { 1560 if (mState == STATE_UNINITIALIZED) { 1561 return ERROR_INVALID_OPERATION; 1562 } 1563 return native_set_marker_pos(markerInFrames); 1564 } 1565 1566 1567 /** 1568 * Sets the period for the periodic notification event. 1569 * @param periodInFrames update period expressed in frames. 1570 * Zero period means no position updates. A negative period is not allowed. 1571 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 1572 */ 1573 public int setPositionNotificationPeriod(int periodInFrames) { 1574 if (mState == STATE_UNINITIALIZED) { 1575 return ERROR_INVALID_OPERATION; 1576 } 1577 return native_set_pos_update_period(periodInFrames); 1578 } 1579 1580 1581 /** 1582 * Sets the playback head position within the static buffer. 1583 * The track must be stopped or paused for the position to be changed, 1584 * and must use the {@link #MODE_STATIC} mode. 1585 * @param positionInFrames playback head position within buffer, expressed in frames. 1586 * Zero corresponds to start of buffer. 1587 * The position must not be greater than the buffer size in frames, or negative. 1588 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 1589 * the position values have different meanings. 1590 * <br> 1591 * If looping is currently enabled and the new position is greater than or equal to the 1592 * loop end marker, the behavior varies by API level: 1593 * as of {@link android.os.Build.VERSION_CODES#M}, 1594 * the looping is first disabled and then the position is set. 1595 * For earlier API levels, the behavior is unspecified. 1596 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1597 * {@link #ERROR_INVALID_OPERATION} 1598 */ 1599 public int setPlaybackHeadPosition(int positionInFrames) { 1600 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1601 getPlayState() == PLAYSTATE_PLAYING) { 1602 return ERROR_INVALID_OPERATION; 1603 } 1604 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 1605 return ERROR_BAD_VALUE; 1606 } 1607 return native_set_position(positionInFrames); 1608 } 1609 1610 /** 1611 * Sets the loop points and the loop count. The loop can be infinite. 1612 * Similarly to setPlaybackHeadPosition, 1613 * the track must be stopped or paused for the loop points to be changed, 1614 * and must use the {@link #MODE_STATIC} mode. 1615 * @param startInFrames loop start marker expressed in frames. 1616 * Zero corresponds to start of buffer. 1617 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 1618 * @param endInFrames loop end marker expressed in frames. 1619 * The total buffer size in frames corresponds to end of buffer. 1620 * The end marker must not be greater than the buffer size in frames. 1621 * For looping, the end marker must not be less than or equal to the start marker, 1622 * but to disable looping 1623 * it is permitted for start marker, end marker, and loop count to all be 0. 1624 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 1625 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 1626 * support, 1627 * {@link #ERROR_BAD_VALUE} is returned. 1628 * The loop range is the interval [startInFrames, endInFrames). 1629 * <br> 1630 * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged, 1631 * unless it is greater than or equal to the loop end marker, in which case 1632 * it is forced to the loop start marker. 1633 * For earlier API levels, the effect on position is unspecified. 1634 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 1635 * A value of -1 means infinite looping, and 0 disables looping. 1636 * A value of positive N means to "loop" (go back) N times. For example, 1637 * a value of one means to play the region two times in total. 1638 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1639 * {@link #ERROR_INVALID_OPERATION} 1640 */ 1641 public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) { 1642 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1643 getPlayState() == PLAYSTATE_PLAYING) { 1644 return ERROR_INVALID_OPERATION; 1645 } 1646 if (loopCount == 0) { 1647 ; // explicitly allowed as an exception to the loop region range check 1648 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 1649 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 1650 return ERROR_BAD_VALUE; 1651 } 1652 return native_set_loop(startInFrames, endInFrames, loopCount); 1653 } 1654 1655 /** 1656 * Sets the initialization state of the instance. This method was originally intended to be used 1657 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 1658 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 1659 * @param state the state of the AudioTrack instance 1660 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 1661 */ 1662 @Deprecated 1663 protected void setState(int state) { 1664 mState = state; 1665 } 1666 1667 1668 //--------------------------------------------------------- 1669 // Transport control methods 1670 //-------------------- 1671 /** 1672 * Starts playing an AudioTrack. 1673 * <p> 1674 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 1675 * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)}, 1676 * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)}, 1677 * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to 1678 * play(). 1679 * <p> 1680 * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to 1681 * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor). 1682 * If you don't call write() first, or if you call write() but with an insufficient amount of 1683 * data, then the track will be in underrun state at play(). In this case, 1684 * playback will not actually start playing until the data path is filled to a 1685 * device-specific minimum level. This requirement for the path to be filled 1686 * to a minimum level is also true when resuming audio playback after calling stop(). 1687 * Similarly the buffer will need to be filled up again after 1688 * the track underruns due to failure to call write() in a timely manner with sufficient data. 1689 * For portability, an application should prime the data path to the maximum allowed 1690 * by writing data until the write() method returns a short transfer count. 1691 * This allows play() to start immediately, and reduces the chance of underrun. 1692 * 1693 * @throws IllegalStateException if the track isn't properly initialized 1694 */ 1695 public void play() 1696 throws IllegalStateException { 1697 if (mState != STATE_INITIALIZED) { 1698 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 1699 } 1700 if (isRestricted()) { 1701 setVolume(0); 1702 } 1703 synchronized(mPlayStateLock) { 1704 native_start(); 1705 mPlayState = PLAYSTATE_PLAYING; 1706 } 1707 } 1708 1709 private boolean isRestricted() { 1710 if ((mAttributes.getAllFlags() & AudioAttributes.FLAG_BYPASS_INTERRUPTION_POLICY) != 0) { 1711 return false; 1712 } 1713 try { 1714 final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType); 1715 final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage, 1716 Process.myUid(), ActivityThread.currentPackageName()); 1717 return mode != AppOpsManager.MODE_ALLOWED; 1718 } catch (RemoteException e) { 1719 return false; 1720 } 1721 } 1722 1723 /** 1724 * Stops playing the audio data. 1725 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 1726 * after the last buffer that was written has been played. For an immediate stop, use 1727 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 1728 * back yet. 1729 * @throws IllegalStateException 1730 */ 1731 public void stop() 1732 throws IllegalStateException { 1733 if (mState != STATE_INITIALIZED) { 1734 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 1735 } 1736 1737 // stop playing 1738 synchronized(mPlayStateLock) { 1739 native_stop(); 1740 mPlayState = PLAYSTATE_STOPPED; 1741 mAvSyncHeader = null; 1742 mAvSyncBytesRemaining = 0; 1743 } 1744 } 1745 1746 /** 1747 * Pauses the playback of the audio data. Data that has not been played 1748 * back will not be discarded. Subsequent calls to {@link #play} will play 1749 * this data back. See {@link #flush()} to discard this data. 1750 * 1751 * @throws IllegalStateException 1752 */ 1753 public void pause() 1754 throws IllegalStateException { 1755 if (mState != STATE_INITIALIZED) { 1756 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 1757 } 1758 //logd("pause()"); 1759 1760 // pause playback 1761 synchronized(mPlayStateLock) { 1762 native_pause(); 1763 mPlayState = PLAYSTATE_PAUSED; 1764 } 1765 } 1766 1767 1768 //--------------------------------------------------------- 1769 // Audio data supply 1770 //-------------------- 1771 1772 /** 1773 * Flushes the audio data currently queued for playback. Any data that has 1774 * been written but not yet presented will be discarded. No-op if not stopped or paused, 1775 * or if the track's creation mode is not {@link #MODE_STREAM}. 1776 * <BR> Note that although data written but not yet presented is discarded, there is no 1777 * guarantee that all of the buffer space formerly used by that data 1778 * is available for a subsequent write. 1779 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 1780 * less than or equal to the total buffer size 1781 * may return a short actual transfer count. 1782 */ 1783 public void flush() { 1784 if (mState == STATE_INITIALIZED) { 1785 // flush the data in native layer 1786 native_flush(); 1787 mAvSyncHeader = null; 1788 mAvSyncBytesRemaining = 0; 1789 } 1790 1791 } 1792 1793 /** 1794 * Writes the audio data to the audio sink for playback (streaming mode), 1795 * or copies audio data for later playback (static buffer mode). 1796 * The format specified in the AudioTrack constructor should be 1797 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1798 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 1799 * <p> 1800 * In streaming mode, the write will normally block until all the data has been enqueued for 1801 * playback, and will return a full transfer count. However, if the track is stopped or paused 1802 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1803 * occurs during the write, then the write may return a short transfer count. 1804 * <p> 1805 * In static buffer mode, copies the data to the buffer starting at offset 0. 1806 * Note that the actual playback of this data might occur after this function returns. 1807 * 1808 * @param audioData the array that holds the data to play. 1809 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 1810 * starts. 1811 * Must not be negative, or cause the data access to go out of bounds of the array. 1812 * @param sizeInBytes the number of bytes to write in audioData after the offset. 1813 * Must not be negative, or cause the data access to go out of bounds of the array. 1814 * @return zero or the positive number of bytes that were written, or 1815 * {@link #ERROR_INVALID_OPERATION} 1816 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1817 * the parameters don't resolve to valid data and indexes, or 1818 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1819 * needs to be recreated. 1820 * The dead object error code is not returned if some data was successfully transferred. 1821 * In this case, the error is returned at the next write(). 1822 * The number of bytes will be a multiple of the frame size in bytes 1823 * not to exceed sizeInBytes. 1824 * 1825 * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code> 1826 * set to {@link #WRITE_BLOCKING}. 1827 */ 1828 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) { 1829 return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING); 1830 } 1831 1832 /** 1833 * Writes the audio data to the audio sink for playback (streaming mode), 1834 * or copies audio data for later playback (static buffer mode). 1835 * The format specified in the AudioTrack constructor should be 1836 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1837 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 1838 * <p> 1839 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1840 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1841 * for playback, and will return a full transfer count. However, if the write mode is 1842 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1843 * interrupts the write by calling stop or pause, or an I/O error 1844 * occurs during the write, then the write may return a short transfer count. 1845 * <p> 1846 * In static buffer mode, copies the data to the buffer starting at offset 0, 1847 * and the write mode is ignored. 1848 * Note that the actual playback of this data might occur after this function returns. 1849 * 1850 * @param audioData the array that holds the data to play. 1851 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 1852 * starts. 1853 * Must not be negative, or cause the data access to go out of bounds of the array. 1854 * @param sizeInBytes the number of bytes to write in audioData after the offset. 1855 * Must not be negative, or cause the data access to go out of bounds of the array. 1856 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1857 * effect in static mode. 1858 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1859 * to the audio sink. 1860 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1861 * queuing as much audio data for playback as possible without blocking. 1862 * @return zero or the positive number of bytes that were written, or 1863 * {@link #ERROR_INVALID_OPERATION} 1864 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1865 * the parameters don't resolve to valid data and indexes, or 1866 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1867 * needs to be recreated. 1868 * The dead object error code is not returned if some data was successfully transferred. 1869 * In this case, the error is returned at the next write(). 1870 * The number of bytes will be a multiple of the frame size in bytes 1871 * not to exceed sizeInBytes. 1872 */ 1873 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes, 1874 @WriteMode int writeMode) { 1875 1876 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1877 return ERROR_INVALID_OPERATION; 1878 } 1879 1880 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1881 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1882 return ERROR_BAD_VALUE; 1883 } 1884 1885 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 1886 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 1887 || (offsetInBytes + sizeInBytes > audioData.length)) { 1888 return ERROR_BAD_VALUE; 1889 } 1890 1891 int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 1892 writeMode == WRITE_BLOCKING); 1893 1894 if ((mDataLoadMode == MODE_STATIC) 1895 && (mState == STATE_NO_STATIC_DATA) 1896 && (ret > 0)) { 1897 // benign race with respect to other APIs that read mState 1898 mState = STATE_INITIALIZED; 1899 } 1900 1901 return ret; 1902 } 1903 1904 /** 1905 * Writes the audio data to the audio sink for playback (streaming mode), 1906 * or copies audio data for later playback (static buffer mode). 1907 * The format specified in the AudioTrack constructor should be 1908 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1909 * <p> 1910 * In streaming mode, the write will normally block until all the data has been enqueued for 1911 * playback, and will return a full transfer count. However, if the track is stopped or paused 1912 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 1913 * occurs during the write, then the write may return a short transfer count. 1914 * <p> 1915 * In static buffer mode, copies the data to the buffer starting at offset 0. 1916 * Note that the actual playback of this data might occur after this function returns. 1917 * 1918 * @param audioData the array that holds the data to play. 1919 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1920 * starts. 1921 * Must not be negative, or cause the data access to go out of bounds of the array. 1922 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1923 * Must not be negative, or cause the data access to go out of bounds of the array. 1924 * @return zero or the positive number of shorts that were written, or 1925 * {@link #ERROR_INVALID_OPERATION} 1926 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1927 * the parameters don't resolve to valid data and indexes, or 1928 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1929 * needs to be recreated. 1930 * The dead object error code is not returned if some data was successfully transferred. 1931 * In this case, the error is returned at the next write(). 1932 * The number of shorts will be a multiple of the channel count not to exceed sizeInShorts. 1933 * 1934 * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code> 1935 * set to {@link #WRITE_BLOCKING}. 1936 */ 1937 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) { 1938 return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING); 1939 } 1940 1941 /** 1942 * Writes the audio data to the audio sink for playback (streaming mode), 1943 * or copies audio data for later playback (static buffer mode). 1944 * The format specified in the AudioTrack constructor should be 1945 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1946 * <p> 1947 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 1948 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 1949 * for playback, and will return a full transfer count. However, if the write mode is 1950 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 1951 * interrupts the write by calling stop or pause, or an I/O error 1952 * occurs during the write, then the write may return a short transfer count. 1953 * <p> 1954 * In static buffer mode, copies the data to the buffer starting at offset 0. 1955 * Note that the actual playback of this data might occur after this function returns. 1956 * 1957 * @param audioData the array that holds the data to write. 1958 * @param offsetInShorts the offset expressed in shorts in audioData where the data to write 1959 * starts. 1960 * Must not be negative, or cause the data access to go out of bounds of the array. 1961 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1962 * Must not be negative, or cause the data access to go out of bounds of the array. 1963 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1964 * effect in static mode. 1965 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1966 * to the audio sink. 1967 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1968 * queuing as much audio data for playback as possible without blocking. 1969 * @return zero or the positive number of shorts that were written, or 1970 * {@link #ERROR_INVALID_OPERATION} 1971 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1972 * the parameters don't resolve to valid data and indexes, or 1973 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1974 * needs to be recreated. 1975 * The dead object error code is not returned if some data was successfully transferred. 1976 * In this case, the error is returned at the next write(). 1977 * The number of shorts will be a multiple of the channel count not to exceed sizeInShorts. 1978 */ 1979 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts, 1980 @WriteMode int writeMode) { 1981 1982 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1983 return ERROR_INVALID_OPERATION; 1984 } 1985 1986 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1987 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1988 return ERROR_BAD_VALUE; 1989 } 1990 1991 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 1992 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 1993 || (offsetInShorts + sizeInShorts > audioData.length)) { 1994 return ERROR_BAD_VALUE; 1995 } 1996 1997 int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat, 1998 writeMode == WRITE_BLOCKING); 1999 2000 if ((mDataLoadMode == MODE_STATIC) 2001 && (mState == STATE_NO_STATIC_DATA) 2002 && (ret > 0)) { 2003 // benign race with respect to other APIs that read mState 2004 mState = STATE_INITIALIZED; 2005 } 2006 2007 return ret; 2008 } 2009 2010 /** 2011 * Writes the audio data to the audio sink for playback (streaming mode), 2012 * or copies audio data for later playback (static buffer mode). 2013 * The format specified in the AudioTrack constructor should be 2014 * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array. 2015 * <p> 2016 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2017 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2018 * for playback, and will return a full transfer count. However, if the write mode is 2019 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2020 * interrupts the write by calling stop or pause, or an I/O error 2021 * occurs during the write, then the write may return a short transfer count. 2022 * <p> 2023 * In static buffer mode, copies the data to the buffer starting at offset 0, 2024 * and the write mode is ignored. 2025 * Note that the actual playback of this data might occur after this function returns. 2026 * 2027 * @param audioData the array that holds the data to write. 2028 * The implementation does not clip for sample values within the nominal range 2029 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 2030 * less than or equal to unity (1.0f), and in the absence of post-processing effects 2031 * that could add energy, such as reverb. For the convenience of applications 2032 * that compute samples using filters with non-unity gain, 2033 * sample values +3 dB beyond the nominal range are permitted. 2034 * However such values may eventually be limited or clipped, depending on various gains 2035 * and later processing in the audio path. Therefore applications are encouraged 2036 * to provide samples values within the nominal range. 2037 * @param offsetInFloats the offset, expressed as a number of floats, 2038 * in audioData where the data to write starts. 2039 * Must not be negative, or cause the data access to go out of bounds of the array. 2040 * @param sizeInFloats the number of floats to write in audioData after the offset. 2041 * Must not be negative, or cause the data access to go out of bounds of the array. 2042 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2043 * effect in static mode. 2044 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2045 * to the audio sink. 2046 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2047 * queuing as much audio data for playback as possible without blocking. 2048 * @return zero or the positive number of floats that were written, or 2049 * {@link #ERROR_INVALID_OPERATION} 2050 * if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if 2051 * the parameters don't resolve to valid data and indexes, or 2052 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2053 * needs to be recreated. 2054 * The dead object error code is not returned if some data was successfully transferred. 2055 * In this case, the error is returned at the next write(). 2056 * The number of floats will be a multiple of the channel count not to exceed sizeInFloats. 2057 */ 2058 public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats, 2059 @WriteMode int writeMode) { 2060 2061 if (mState == STATE_UNINITIALIZED) { 2062 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2063 return ERROR_INVALID_OPERATION; 2064 } 2065 2066 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 2067 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 2068 return ERROR_INVALID_OPERATION; 2069 } 2070 2071 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2072 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2073 return ERROR_BAD_VALUE; 2074 } 2075 2076 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 2077 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 2078 || (offsetInFloats + sizeInFloats > audioData.length)) { 2079 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 2080 return ERROR_BAD_VALUE; 2081 } 2082 2083 int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 2084 writeMode == WRITE_BLOCKING); 2085 2086 if ((mDataLoadMode == MODE_STATIC) 2087 && (mState == STATE_NO_STATIC_DATA) 2088 && (ret > 0)) { 2089 // benign race with respect to other APIs that read mState 2090 mState = STATE_INITIALIZED; 2091 } 2092 2093 return ret; 2094 } 2095 2096 2097 /** 2098 * Writes the audio data to the audio sink for playback (streaming mode), 2099 * or copies audio data for later playback (static buffer mode). 2100 * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor. 2101 * <p> 2102 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2103 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2104 * for playback, and will return a full transfer count. However, if the write mode is 2105 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2106 * interrupts the write by calling stop or pause, or an I/O error 2107 * occurs during the write, then the write may return a short transfer count. 2108 * <p> 2109 * In static buffer mode, copies the data to the buffer starting at offset 0, 2110 * and the write mode is ignored. 2111 * Note that the actual playback of this data might occur after this function returns. 2112 * 2113 * @param audioData the buffer that holds the data to write, starting at the position reported 2114 * by <code>audioData.position()</code>. 2115 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2116 * have been advanced to reflect the amount of data that was successfully written to 2117 * the AudioTrack. 2118 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2119 * that the number of bytes requested be a multiple of the frame size (sample size in 2120 * bytes multiplied by the channel count). 2121 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2122 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2123 * effect in static mode. 2124 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2125 * to the audio sink. 2126 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2127 * queuing as much audio data for playback as possible without blocking. 2128 * @return zero or the positive number of bytes that were written, or 2129 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 2130 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2131 * needs to be recreated. 2132 * The dead object error code is not returned if some data was successfully transferred. 2133 * In this case, the error is returned at the next write(). 2134 */ 2135 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2136 @WriteMode int writeMode) { 2137 2138 if (mState == STATE_UNINITIALIZED) { 2139 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2140 return ERROR_INVALID_OPERATION; 2141 } 2142 2143 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2144 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2145 return ERROR_BAD_VALUE; 2146 } 2147 2148 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2149 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2150 return ERROR_BAD_VALUE; 2151 } 2152 2153 int ret = 0; 2154 if (audioData.isDirect()) { 2155 ret = native_write_native_bytes(audioData, 2156 audioData.position(), sizeInBytes, mAudioFormat, 2157 writeMode == WRITE_BLOCKING); 2158 } else { 2159 ret = native_write_byte(NioUtils.unsafeArray(audioData), 2160 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 2161 sizeInBytes, mAudioFormat, 2162 writeMode == WRITE_BLOCKING); 2163 } 2164 2165 if ((mDataLoadMode == MODE_STATIC) 2166 && (mState == STATE_NO_STATIC_DATA) 2167 && (ret > 0)) { 2168 // benign race with respect to other APIs that read mState 2169 mState = STATE_INITIALIZED; 2170 } 2171 2172 if (ret > 0) { 2173 audioData.position(audioData.position() + ret); 2174 } 2175 2176 return ret; 2177 } 2178 2179 /** 2180 * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track. 2181 * The blocking behavior will depend on the write mode. 2182 * @param audioData the buffer that holds the data to write, starting at the position reported 2183 * by <code>audioData.position()</code>. 2184 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2185 * have been advanced to reflect the amount of data that was successfully written to 2186 * the AudioTrack. 2187 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2188 * that the number of bytes requested be a multiple of the frame size (sample size in 2189 * bytes multiplied by the channel count). 2190 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2191 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. 2192 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2193 * to the audio sink. 2194 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2195 * queuing as much audio data for playback as possible without blocking. 2196 * @param timestamp The timestamp of the first decodable audio frame in the provided audioData. 2197 * @return zero or a positive number of bytes that were written, or 2198 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 2199 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2200 * needs to be recreated. 2201 * The dead object error code is not returned if some data was successfully transferred. 2202 * In this case, the error is returned at the next write(). 2203 */ 2204 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2205 @WriteMode int writeMode, long timestamp) { 2206 2207 if (mState == STATE_UNINITIALIZED) { 2208 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2209 return ERROR_INVALID_OPERATION; 2210 } 2211 2212 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2213 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2214 return ERROR_BAD_VALUE; 2215 } 2216 2217 if (mDataLoadMode != MODE_STREAM) { 2218 Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track"); 2219 return ERROR_INVALID_OPERATION; 2220 } 2221 2222 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) { 2223 Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts..."); 2224 return write(audioData, sizeInBytes, writeMode); 2225 } 2226 2227 if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2228 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2229 return ERROR_BAD_VALUE; 2230 } 2231 2232 // create timestamp header if none exists 2233 if (mAvSyncHeader == null) { 2234 mAvSyncHeader = ByteBuffer.allocate(16); 2235 mAvSyncHeader.order(ByteOrder.BIG_ENDIAN); 2236 mAvSyncHeader.putInt(0x55550001); 2237 mAvSyncHeader.putInt(sizeInBytes); 2238 mAvSyncHeader.putLong(timestamp); 2239 mAvSyncHeader.position(0); 2240 mAvSyncBytesRemaining = sizeInBytes; 2241 } 2242 2243 // write timestamp header if not completely written already 2244 int ret = 0; 2245 if (mAvSyncHeader.remaining() != 0) { 2246 ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode); 2247 if (ret < 0) { 2248 Log.e(TAG, "AudioTrack.write() could not write timestamp header!"); 2249 mAvSyncHeader = null; 2250 mAvSyncBytesRemaining = 0; 2251 return ret; 2252 } 2253 if (mAvSyncHeader.remaining() > 0) { 2254 Log.v(TAG, "AudioTrack.write() partial timestamp header written."); 2255 return 0; 2256 } 2257 } 2258 2259 // write audio data 2260 int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes); 2261 ret = write(audioData, sizeToWrite, writeMode); 2262 if (ret < 0) { 2263 Log.e(TAG, "AudioTrack.write() could not write audio data!"); 2264 mAvSyncHeader = null; 2265 mAvSyncBytesRemaining = 0; 2266 return ret; 2267 } 2268 2269 mAvSyncBytesRemaining -= ret; 2270 if (mAvSyncBytesRemaining == 0) { 2271 mAvSyncHeader = null; 2272 } 2273 2274 return ret; 2275 } 2276 2277 2278 /** 2279 * Sets the playback head position within the static buffer to zero, 2280 * that is it rewinds to start of static buffer. 2281 * The track must be stopped or paused, and 2282 * the track's creation mode must be {@link #MODE_STATIC}. 2283 * <p> 2284 * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by 2285 * {@link #getPlaybackHeadPosition()} to zero. 2286 * For earlier API levels, the reset behavior is unspecified. 2287 * <p> 2288 * Use {@link #setPlaybackHeadPosition(int)} with a zero position 2289 * if the reset of <code>getPlaybackHeadPosition()</code> is not needed. 2290 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2291 * {@link #ERROR_INVALID_OPERATION} 2292 */ 2293 public int reloadStaticData() { 2294 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 2295 return ERROR_INVALID_OPERATION; 2296 } 2297 return native_reload_static(); 2298 } 2299 2300 //-------------------------------------------------------------------------- 2301 // Audio effects management 2302 //-------------------- 2303 2304 /** 2305 * Attaches an auxiliary effect to the audio track. A typical auxiliary 2306 * effect is a reverberation effect which can be applied on any sound source 2307 * that directs a certain amount of its energy to this effect. This amount 2308 * is defined by setAuxEffectSendLevel(). 2309 * {@see #setAuxEffectSendLevel(float)}. 2310 * <p>After creating an auxiliary effect (e.g. 2311 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 2312 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 2313 * this method to attach the audio track to the effect. 2314 * <p>To detach the effect from the audio track, call this method with a 2315 * null effect id. 2316 * 2317 * @param effectId system wide unique id of the effect to attach 2318 * @return error code or success, see {@link #SUCCESS}, 2319 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 2320 */ 2321 public int attachAuxEffect(int effectId) { 2322 if (mState == STATE_UNINITIALIZED) { 2323 return ERROR_INVALID_OPERATION; 2324 } 2325 return native_attachAuxEffect(effectId); 2326 } 2327 2328 /** 2329 * Sets the send level of the audio track to the attached auxiliary effect 2330 * {@link #attachAuxEffect(int)}. Effect levels 2331 * are clamped to the closed interval [0.0, max] where 2332 * max is the value of {@link #getMaxVolume}. 2333 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 2334 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 2335 * this method must be called for the effect to be applied. 2336 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 2337 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 2338 * so an appropriate conversion from linear UI input x to level is: 2339 * x == 0 -> level = 0 2340 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 2341 * 2342 * @param level linear send level 2343 * @return error code or success, see {@link #SUCCESS}, 2344 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 2345 */ 2346 public int setAuxEffectSendLevel(float level) { 2347 if (isRestricted()) { 2348 return SUCCESS; 2349 } 2350 if (mState == STATE_UNINITIALIZED) { 2351 return ERROR_INVALID_OPERATION; 2352 } 2353 level = clampGainOrLevel(level); 2354 int err = native_setAuxEffectSendLevel(level); 2355 return err == 0 ? SUCCESS : ERROR; 2356 } 2357 2358 //-------------------------------------------------------------------------- 2359 // Explicit Routing 2360 //-------------------- 2361 private AudioDeviceInfo mPreferredDevice = null; 2362 2363 /** 2364 * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route 2365 * the output from this AudioTrack. 2366 * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink. 2367 * If deviceInfo is null, default routing is restored. 2368 * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and 2369 * does not correspond to a valid audio output device. 2370 */ 2371 public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) { 2372 // Do some validation.... 2373 if (deviceInfo != null && !deviceInfo.isSink()) { 2374 return false; 2375 } 2376 int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0; 2377 boolean status = native_setOutputDevice(preferredDeviceId); 2378 if (status == true) { 2379 synchronized (this) { 2380 mPreferredDevice = deviceInfo; 2381 } 2382 } 2383 return status; 2384 } 2385 2386 /** 2387 * Returns the selected output specified by {@link #setPreferredDevice}. Note that this 2388 * is not guaranteed to correspond to the actual device being used for playback. 2389 */ 2390 public AudioDeviceInfo getPreferredDevice() { 2391 synchronized (this) { 2392 return mPreferredDevice; 2393 } 2394 } 2395 2396 /** 2397 * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack. 2398 * Note: The query is only valid if the AudioTrack is currently playing. If it is not, 2399 * <code>getRoutedDevice()</code> will return null. 2400 */ 2401 public AudioDeviceInfo getRoutedDevice() { 2402 int deviceId = native_getRoutedDeviceId(); 2403 if (deviceId == 0) { 2404 return null; 2405 } 2406 AudioDeviceInfo[] devices = 2407 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS); 2408 for (int i = 0; i < devices.length; i++) { 2409 if (devices[i].getId() == deviceId) { 2410 return devices[i]; 2411 } 2412 } 2413 return null; 2414 } 2415 2416 /* 2417 * Call BEFORE adding a routing callback handler. 2418 */ 2419 private void testEnableNativeRoutingCallbacks() { 2420 if (mRoutingChangeListeners.size() == 0 && mNewRoutingChangeListeners.size() == 0) { 2421 native_enableDeviceCallback(); 2422 } 2423 } 2424 2425 /* 2426 * Call AFTER removing a routing callback handler. 2427 */ 2428 private void testDisableNativeRoutingCallbacks() { 2429 if (mRoutingChangeListeners.size() == 0 && mNewRoutingChangeListeners.size() == 0) { 2430 native_disableDeviceCallback(); 2431 } 2432 } 2433 2434 //-------------------------------------------------------------------------- 2435 // >= "N" (Re)Routing Info 2436 //-------------------- 2437 /** 2438 * The list of AudioRouting.OnRoutingChangedListener interfaces added (with 2439 * {@link AudioTrack#addOnRoutingListener(AudioRouting.OnRoutingChangedListener, 2440 * android.os.Handler)} 2441 * by an app to receive (re)routing notifications. 2442 */ 2443 private ArrayMap<AudioRouting.OnRoutingChangedListener, NativeNewRoutingEventHandlerDelegate> 2444 mNewRoutingChangeListeners = 2445 new ArrayMap<AudioRouting.OnRoutingChangedListener, NativeNewRoutingEventHandlerDelegate>(); 2446 2447 /** 2448 * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing 2449 * changes on this AudioTrack. 2450 * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive 2451 * notifications of rerouting events. 2452 * @param handler Specifies the {@link Handler} object for the thread on which to execute 2453 * the callback. If <code>null</code>, the {@link Handler} associated with the main 2454 * {@link Looper} will be used. 2455 */ 2456 public void addOnRoutingListener(AudioRouting.OnRoutingChangedListener listener, 2457 Handler handler) { 2458 if (listener != null && !mNewRoutingChangeListeners.containsKey(listener)) { 2459 synchronized (mNewRoutingChangeListeners) { 2460 testEnableNativeRoutingCallbacks(); 2461 mNewRoutingChangeListeners.put( 2462 listener, new NativeNewRoutingEventHandlerDelegate(this, listener, 2463 handler != null ? handler : new Handler(mInitializationLooper))); 2464 } 2465 } 2466 } 2467 2468 /** 2469 * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added 2470 * to receive rerouting notifications. 2471 * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface 2472 * to remove. 2473 */ 2474 public void removeOnRoutingListener(AudioRouting.OnRoutingChangedListener listener) { 2475 if (mNewRoutingChangeListeners.containsKey(listener)) { 2476 mNewRoutingChangeListeners.remove(listener); 2477 } 2478 testDisableNativeRoutingCallbacks(); 2479 } 2480 2481 //-------------------------------------------------------------------------- 2482 // Marshmallow (Re)Routing Info 2483 //-------------------- 2484 /** 2485 * Defines the interface by which applications can receive notifications of routing 2486 * changes for the associated {@link AudioTrack}. 2487 */ 2488 @Deprecated 2489 public interface OnRoutingChangedListener { 2490 /** 2491 * Called when the routing of an AudioTrack changes from either and explicit or 2492 * policy rerouting. Use {@link #getRoutedDevice()} to retrieve the newly routed-to 2493 * device. 2494 */ 2495 @Deprecated 2496 public void onRoutingChanged(AudioTrack audioTrack); 2497 } 2498 2499 /** 2500 * The list of AudioTrack.OnRoutingChangedListener interfaces added (with 2501 * {@link AudioTrack#addOnRoutingChangedListener(OnRoutingChangedListener, android.os.Handler)} 2502 * by an app to receive (re)routing notifications. 2503 */ 2504 private ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate> 2505 mRoutingChangeListeners = 2506 new ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate>(); 2507 2508 /** 2509 * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes 2510 * on this AudioTrack. 2511 * @param listener The {@link OnRoutingChangedListener} interface to receive notifications 2512 * of rerouting events. 2513 * @param handler Specifies the {@link Handler} object for the thread on which to execute 2514 * the callback. If <code>null</code>, the {@link Handler} associated with the main 2515 * {@link Looper} will be used. 2516 */ 2517 @Deprecated 2518 public void addOnRoutingChangedListener(OnRoutingChangedListener listener, 2519 android.os.Handler handler) { 2520 if (listener != null && !mRoutingChangeListeners.containsKey(listener)) { 2521 synchronized (mRoutingChangeListeners) { 2522 testEnableNativeRoutingCallbacks(); 2523 mRoutingChangeListeners.put( 2524 listener, new NativeRoutingEventHandlerDelegate(this, listener, 2525 handler != null ? handler : new Handler(mInitializationLooper))); 2526 } 2527 } 2528 } 2529 2530 /** 2531 * Removes an {@link OnRoutingChangedListener} which has been previously added 2532 * to receive rerouting notifications. 2533 * @param listener The previously added {@link OnRoutingChangedListener} interface to remove. 2534 */ 2535 @Deprecated 2536 public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) { 2537 synchronized (mRoutingChangeListeners) { 2538 if (mRoutingChangeListeners.containsKey(listener)) { 2539 mRoutingChangeListeners.remove(listener); 2540 } 2541 testDisableNativeRoutingCallbacks(); 2542 } 2543 } 2544 2545 /** 2546 * Sends device list change notification to all listeners. 2547 */ 2548 private void broadcastRoutingChange() { 2549 AudioManager.resetAudioPortGeneration(); 2550 2551 // Marshmallow Routing 2552 Collection<NativeRoutingEventHandlerDelegate> values; 2553 synchronized (mRoutingChangeListeners) { 2554 values = mRoutingChangeListeners.values(); 2555 } 2556 for(NativeRoutingEventHandlerDelegate delegate : values) { 2557 Handler handler = delegate.getHandler(); 2558 if (handler != null) { 2559 handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE); 2560 } 2561 } 2562 // >= "N" Routing 2563 Collection<NativeNewRoutingEventHandlerDelegate> newValues; 2564 synchronized (mNewRoutingChangeListeners) { 2565 newValues = mNewRoutingChangeListeners.values(); 2566 } 2567 for(NativeNewRoutingEventHandlerDelegate delegate : newValues) { 2568 Handler handler = delegate.getHandler(); 2569 if (handler != null) { 2570 handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE); 2571 } 2572 } 2573 } 2574 2575 //--------------------------------------------------------- 2576 // Interface definitions 2577 //-------------------- 2578 /** 2579 * Interface definition for a callback to be invoked when the playback head position of 2580 * an AudioTrack has reached a notification marker or has increased by a certain period. 2581 */ 2582 public interface OnPlaybackPositionUpdateListener { 2583 /** 2584 * Called on the listener to notify it that the previously set marker has been reached 2585 * by the playback head. 2586 */ 2587 void onMarkerReached(AudioTrack track); 2588 2589 /** 2590 * Called on the listener to periodically notify it that the playback head has reached 2591 * a multiple of the notification period. 2592 */ 2593 void onPeriodicNotification(AudioTrack track); 2594 } 2595 2596 //--------------------------------------------------------- 2597 // Inner classes 2598 //-------------------- 2599 /** 2600 * Helper class to handle the forwarding of native events to the appropriate listener 2601 * (potentially) handled in a different thread 2602 */ 2603 private class NativePositionEventHandlerDelegate { 2604 private final Handler mHandler; 2605 2606 NativePositionEventHandlerDelegate(final AudioTrack track, 2607 final OnPlaybackPositionUpdateListener listener, 2608 Handler handler) { 2609 // find the looper for our new event handler 2610 Looper looper; 2611 if (handler != null) { 2612 looper = handler.getLooper(); 2613 } else { 2614 // no given handler, use the looper the AudioTrack was created in 2615 looper = mInitializationLooper; 2616 } 2617 2618 // construct the event handler with this looper 2619 if (looper != null) { 2620 // implement the event handler delegate 2621 mHandler = new Handler(looper) { 2622 @Override 2623 public void handleMessage(Message msg) { 2624 if (track == null) { 2625 return; 2626 } 2627 switch(msg.what) { 2628 case NATIVE_EVENT_MARKER: 2629 if (listener != null) { 2630 listener.onMarkerReached(track); 2631 } 2632 break; 2633 case NATIVE_EVENT_NEW_POS: 2634 if (listener != null) { 2635 listener.onPeriodicNotification(track); 2636 } 2637 break; 2638 default: 2639 loge("Unknown native event type: " + msg.what); 2640 break; 2641 } 2642 } 2643 }; 2644 } else { 2645 mHandler = null; 2646 } 2647 } 2648 2649 Handler getHandler() { 2650 return mHandler; 2651 } 2652 } 2653 2654 /** 2655 * Marshmallow Routing API. 2656 * Helper class to handle the forwarding of native events to the appropriate listener 2657 * (potentially) handled in a different thread 2658 */ 2659 private class NativeRoutingEventHandlerDelegate { 2660 private final Handler mHandler; 2661 2662 NativeRoutingEventHandlerDelegate(final AudioTrack track, 2663 final OnRoutingChangedListener listener, 2664 Handler handler) { 2665 // find the looper for our new event handler 2666 Looper looper; 2667 if (handler != null) { 2668 looper = handler.getLooper(); 2669 } else { 2670 // no given handler, use the looper the AudioTrack was created in 2671 looper = mInitializationLooper; 2672 } 2673 2674 // construct the event handler with this looper 2675 if (looper != null) { 2676 // implement the event handler delegate 2677 mHandler = new Handler(looper) { 2678 @Override 2679 public void handleMessage(Message msg) { 2680 if (track == null) { 2681 return; 2682 } 2683 switch(msg.what) { 2684 case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE: 2685 if (listener != null) { 2686 listener.onRoutingChanged(track); 2687 } 2688 break; 2689 default: 2690 loge("Unknown native event type: " + msg.what); 2691 break; 2692 } 2693 } 2694 }; 2695 } else { 2696 mHandler = null; 2697 } 2698 } 2699 2700 Handler getHandler() { 2701 return mHandler; 2702 } 2703 } 2704 2705 /** 2706 * Marshmallow Routing API. 2707 * Helper class to handle the forwarding of native events to the appropriate listener 2708 * (potentially) handled in a different thread 2709 */ 2710 private class NativeNewRoutingEventHandlerDelegate { 2711 private final Handler mHandler; 2712 2713 NativeNewRoutingEventHandlerDelegate(final AudioTrack track, 2714 final AudioRouting.OnRoutingChangedListener listener, 2715 Handler handler) { 2716 // find the looper for our new event handler 2717 Looper looper; 2718 if (handler != null) { 2719 looper = handler.getLooper(); 2720 } else { 2721 // no given handler, use the looper the AudioTrack was created in 2722 looper = mInitializationLooper; 2723 } 2724 2725 // construct the event handler with this looper 2726 if (looper != null) { 2727 // implement the event handler delegate 2728 mHandler = new Handler(looper) { 2729 @Override 2730 public void handleMessage(Message msg) { 2731 if (track == null) { 2732 return; 2733 } 2734 switch(msg.what) { 2735 case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE: 2736 if (listener != null) { 2737 listener.onRoutingChanged(track); 2738 } 2739 break; 2740 default: 2741 loge("Unknown native event type: " + msg.what); 2742 break; 2743 } 2744 } 2745 }; 2746 } else { 2747 mHandler = null; 2748 } 2749 } 2750 2751 Handler getHandler() { 2752 return mHandler; 2753 } 2754 } 2755 2756 //--------------------------------------------------------- 2757 // Java methods called from the native side 2758 //-------------------- 2759 @SuppressWarnings("unused") 2760 private static void postEventFromNative(Object audiotrack_ref, 2761 int what, int arg1, int arg2, Object obj) { 2762 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 2763 AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get(); 2764 if (track == null) { 2765 return; 2766 } 2767 2768 if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) { 2769 track.broadcastRoutingChange(); 2770 return; 2771 } 2772 NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate; 2773 if (delegate != null) { 2774 Handler handler = delegate.getHandler(); 2775 if (handler != null) { 2776 Message m = handler.obtainMessage(what, arg1, arg2, obj); 2777 handler.sendMessage(m); 2778 } 2779 } 2780 } 2781 2782 2783 //--------------------------------------------------------- 2784 // Native methods called from the Java side 2785 //-------------------- 2786 2787 // post-condition: mStreamType is overwritten with a value 2788 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 2789 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC 2790 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 2791 Object /*AudioAttributes*/ attributes, 2792 int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, 2793 int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack); 2794 2795 private native final void native_finalize(); 2796 2797 /** 2798 * @hide 2799 */ 2800 public native final void native_release(); 2801 2802 private native final void native_start(); 2803 2804 private native final void native_stop(); 2805 2806 private native final void native_pause(); 2807 2808 private native final void native_flush(); 2809 2810 private native final int native_write_byte(byte[] audioData, 2811 int offsetInBytes, int sizeInBytes, int format, 2812 boolean isBlocking); 2813 2814 private native final int native_write_short(short[] audioData, 2815 int offsetInShorts, int sizeInShorts, int format, 2816 boolean isBlocking); 2817 2818 private native final int native_write_float(float[] audioData, 2819 int offsetInFloats, int sizeInFloats, int format, 2820 boolean isBlocking); 2821 2822 private native final int native_write_native_bytes(Object audioData, 2823 int positionInBytes, int sizeInBytes, int format, boolean blocking); 2824 2825 private native final int native_reload_static(); 2826 2827 private native final int native_get_buffer_size_frames(); 2828 private native final int native_set_buffer_size_frames(int bufferSizeInFrames); 2829 private native final int native_get_buffer_capacity_frames(); 2830 2831 private native final void native_setVolume(float leftVolume, float rightVolume); 2832 2833 private native final int native_set_playback_rate(int sampleRateInHz); 2834 private native final int native_get_playback_rate(); 2835 2836 private native final void native_set_playback_params(@NonNull PlaybackParams params); 2837 private native final @NonNull PlaybackParams native_get_playback_params(); 2838 2839 private native final int native_set_marker_pos(int marker); 2840 private native final int native_get_marker_pos(); 2841 2842 private native final int native_set_pos_update_period(int updatePeriod); 2843 private native final int native_get_pos_update_period(); 2844 2845 private native final int native_set_position(int position); 2846 private native final int native_get_position(); 2847 2848 private native final int native_get_latency(); 2849 2850 private native final int native_get_underrun_count(); 2851 2852 // longArray must be a non-null array of length >= 2 2853 // [0] is assigned the frame position 2854 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds 2855 private native final int native_get_timestamp(long[] longArray); 2856 2857 private native final int native_set_loop(int start, int end, int loopCount); 2858 2859 static private native final int native_get_output_sample_rate(int streamType); 2860 static private native final int native_get_min_buff_size( 2861 int sampleRateInHz, int channelConfig, int audioFormat); 2862 2863 private native final int native_attachAuxEffect(int effectId); 2864 private native final int native_setAuxEffectSendLevel(float level); 2865 2866 private native final boolean native_setOutputDevice(int deviceId); 2867 private native final int native_getRoutedDeviceId(); 2868 private native final void native_enableDeviceCallback(); 2869 private native final void native_disableDeviceCallback(); 2870 static private native int native_get_FCC_8(); 2871 2872 //--------------------------------------------------------- 2873 // Utility methods 2874 //------------------ 2875 2876 private static void logd(String msg) { 2877 Log.d(TAG, msg); 2878 } 2879 2880 private static void loge(String msg) { 2881 Log.e(TAG, msg); 2882 } 2883} 2884