AudioTrack.java revision 35ba5ab4503f4818f0dd131ad1d2bf91f7397cad
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17package android.media; 18 19import java.lang.annotation.Retention; 20import java.lang.annotation.RetentionPolicy; 21import java.lang.ref.WeakReference; 22import java.nio.ByteBuffer; 23import java.nio.NioUtils; 24 25import android.annotation.IntDef; 26import android.annotation.NonNull; 27import android.annotation.SystemApi; 28import android.app.ActivityThread; 29import android.app.AppOpsManager; 30import android.content.Context; 31import android.os.Handler; 32import android.os.IBinder; 33import android.os.Looper; 34import android.os.Message; 35import android.os.Process; 36import android.os.RemoteException; 37import android.os.ServiceManager; 38import android.util.Log; 39 40import com.android.internal.app.IAppOpsService; 41 42 43/** 44 * The AudioTrack class manages and plays a single audio resource for Java applications. 45 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 46 * achieved by "pushing" the data to the AudioTrack object using one of the 47 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 48 * and {@link #write(float[], int, int, int)} methods. 49 * 50 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 51 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 52 * one of the {@code write()} methods. These are blocking and return when the data has been 53 * transferred from the Java layer to the native layer and queued for playback. The streaming 54 * mode is most useful when playing blocks of audio data that for instance are: 55 * 56 * <ul> 57 * <li>too big to fit in memory because of the duration of the sound to play,</li> 58 * <li>too big to fit in memory because of the characteristics of the audio data 59 * (high sampling rate, bits per sample ...)</li> 60 * <li>received or generated while previously queued audio is playing.</li> 61 * </ul> 62 * 63 * The static mode should be chosen when dealing with short sounds that fit in memory and 64 * that need to be played with the smallest latency possible. The static mode will 65 * therefore be preferred for UI and game sounds that are played often, and with the 66 * smallest overhead possible. 67 * 68 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 69 * The size of this buffer, specified during the construction, determines how long an AudioTrack 70 * can play before running out of data.<br> 71 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 72 * be played from it.<br> 73 * For the streaming mode, data will be written to the audio sink in chunks of 74 * sizes less than or equal to the total buffer size. 75 * 76 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 77 */ 78public class AudioTrack 79{ 80 //--------------------------------------------------------- 81 // Constants 82 //-------------------- 83 /** Minimum value for a linear gain or auxiliary effect level. 84 * This value must be exactly equal to 0.0f; do not change it. 85 */ 86 private static final float GAIN_MIN = 0.0f; 87 /** Maximum value for a linear gain or auxiliary effect level. 88 * This value must be greater than or equal to 1.0f. 89 */ 90 private static final float GAIN_MAX = 1.0f; 91 92 /** Minimum value for sample rate */ 93 private static final int SAMPLE_RATE_HZ_MIN = 4000; 94 /** Maximum value for sample rate */ 95 private static final int SAMPLE_RATE_HZ_MAX = 96000; 96 97 /** Maximum value for AudioTrack channel count */ 98 private static final int CHANNEL_COUNT_MAX = 8; 99 100 /** indicates AudioTrack state is stopped */ 101 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 102 /** indicates AudioTrack state is paused */ 103 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 104 /** indicates AudioTrack state is playing */ 105 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 106 107 // keep these values in sync with android_media_AudioTrack.cpp 108 /** 109 * Creation mode where audio data is transferred from Java to the native layer 110 * only once before the audio starts playing. 111 */ 112 public static final int MODE_STATIC = 0; 113 /** 114 * Creation mode where audio data is streamed from Java to the native layer 115 * as the audio is playing. 116 */ 117 public static final int MODE_STREAM = 1; 118 119 /** @hide */ 120 @IntDef({ 121 MODE_STATIC, 122 MODE_STREAM 123 }) 124 @Retention(RetentionPolicy.SOURCE) 125 public @interface TransferMode {} 126 127 /** 128 * State of an AudioTrack that was not successfully initialized upon creation. 129 */ 130 public static final int STATE_UNINITIALIZED = 0; 131 /** 132 * State of an AudioTrack that is ready to be used. 133 */ 134 public static final int STATE_INITIALIZED = 1; 135 /** 136 * State of a successfully initialized AudioTrack that uses static data, 137 * but that hasn't received that data yet. 138 */ 139 public static final int STATE_NO_STATIC_DATA = 2; 140 141 /** 142 * Denotes a successful operation. 143 */ 144 public static final int SUCCESS = AudioSystem.SUCCESS; 145 /** 146 * Denotes a generic operation failure. 147 */ 148 public static final int ERROR = AudioSystem.ERROR; 149 /** 150 * Denotes a failure due to the use of an invalid value. 151 */ 152 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 153 /** 154 * Denotes a failure due to the improper use of a method. 155 */ 156 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 157 158 // Error codes: 159 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 160 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 161 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 162 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 163 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 164 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 165 166 // Events: 167 // to keep in sync with frameworks/av/include/media/AudioTrack.h 168 /** 169 * Event id denotes when playback head has reached a previously set marker. 170 */ 171 private static final int NATIVE_EVENT_MARKER = 3; 172 /** 173 * Event id denotes when previously set update period has elapsed during playback. 174 */ 175 private static final int NATIVE_EVENT_NEW_POS = 4; 176 177 private final static String TAG = "android.media.AudioTrack"; 178 179 180 /** @hide */ 181 @IntDef({ 182 WRITE_BLOCKING, 183 WRITE_NON_BLOCKING 184 }) 185 @Retention(RetentionPolicy.SOURCE) 186 public @interface WriteMode {} 187 188 /** 189 * The write mode indicating the write operation will block until all data has been written, 190 * to be used in {@link #write(ByteBuffer, int, int)} 191 */ 192 public final static int WRITE_BLOCKING = 0; 193 /** 194 * The write mode indicating the write operation will return immediately after 195 * queuing as much audio data for playback as possible without blocking, to be used in 196 * {@link #write(ByteBuffer, int, int)}. 197 */ 198 public final static int WRITE_NON_BLOCKING = 1; 199 200 //-------------------------------------------------------------------------- 201 // Member variables 202 //-------------------- 203 /** 204 * Indicates the state of the AudioTrack instance. 205 */ 206 private int mState = STATE_UNINITIALIZED; 207 /** 208 * Indicates the play state of the AudioTrack instance. 209 */ 210 private int mPlayState = PLAYSTATE_STOPPED; 211 /** 212 * Lock to make sure mPlayState updates are reflecting the actual state of the object. 213 */ 214 private final Object mPlayStateLock = new Object(); 215 /** 216 * Sizes of the native audio buffer. 217 */ 218 private int mNativeBufferSizeInBytes = 0; 219 private int mNativeBufferSizeInFrames = 0; 220 /** 221 * Handler for events coming from the native code. 222 */ 223 private NativeEventHandlerDelegate mEventHandlerDelegate; 224 /** 225 * Looper associated with the thread that creates the AudioTrack instance. 226 */ 227 private final Looper mInitializationLooper; 228 /** 229 * The audio data source sampling rate in Hz. 230 */ 231 private int mSampleRate; // initialized by all constructors 232 /** 233 * The number of audio output channels (1 is mono, 2 is stereo). 234 */ 235 private int mChannelCount = 1; 236 /** 237 * The audio channel mask. 238 */ 239 private int mChannels = AudioFormat.CHANNEL_OUT_MONO; 240 241 /** 242 * The type of the audio stream to play. See 243 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 244 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 245 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 246 * {@link AudioManager#STREAM_DTMF}. 247 */ 248 private int mStreamType = AudioManager.STREAM_MUSIC; 249 250 private final AudioAttributes mAttributes; 251 /** 252 * The way audio is consumed by the audio sink, streaming or static. 253 */ 254 private int mDataLoadMode = MODE_STREAM; 255 /** 256 * The current audio channel configuration. 257 */ 258 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 259 /** 260 * The encoding of the audio samples. 261 * @see AudioFormat#ENCODING_PCM_8BIT 262 * @see AudioFormat#ENCODING_PCM_16BIT 263 * @see AudioFormat#ENCODING_PCM_FLOAT 264 */ 265 private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT; 266 /** 267 * Audio session ID 268 */ 269 private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE; 270 /** 271 * Reference to the app-ops service. 272 */ 273 private final IAppOpsService mAppOps; 274 275 //-------------------------------- 276 // Used exclusively by native code 277 //-------------------- 278 /** 279 * Accessed by native methods: provides access to C++ AudioTrack object. 280 */ 281 @SuppressWarnings("unused") 282 private long mNativeTrackInJavaObj; 283 /** 284 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 285 * the native AudioTrack object, but not stored in it). 286 */ 287 @SuppressWarnings("unused") 288 private long mJniData; 289 290 291 //-------------------------------------------------------------------------- 292 // Constructor, Finalize 293 //-------------------- 294 /** 295 * Class constructor. 296 * @param streamType the type of the audio stream. See 297 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 298 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 299 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 300 * @param sampleRateInHz the initial source sample rate expressed in Hz. 301 * @param channelConfig describes the configuration of the audio channels. 302 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 303 * {@link AudioFormat#CHANNEL_OUT_STEREO} 304 * @param audioFormat the format in which the audio data is represented. 305 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 306 * {@link AudioFormat#ENCODING_PCM_8BIT}, 307 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 308 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 309 * read from for playback. 310 * If track's creation mode is {@link #MODE_STREAM}, you can write data into 311 * this buffer in chunks less than or equal to this size, and it is typical to use 312 * chunks of 1/2 of the total size to permit double-buffering. 313 * If the track's creation mode is {@link #MODE_STATIC}, 314 * this is the maximum length sample, or audio clip, that can be played by this instance. 315 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 316 * for the successful creation of an AudioTrack instance in streaming mode. Using values 317 * smaller than getMinBufferSize() will result in an initialization failure. 318 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 319 * @throws java.lang.IllegalArgumentException 320 */ 321 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 322 int bufferSizeInBytes, int mode) 323 throws IllegalArgumentException { 324 this(streamType, sampleRateInHz, channelConfig, audioFormat, 325 bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE); 326 } 327 328 /** 329 * Class constructor with audio session. Use this constructor when the AudioTrack must be 330 * attached to a particular audio session. The primary use of the audio session ID is to 331 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 332 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 333 * and media players in the same session and not to the output mix. 334 * When an AudioTrack is created without specifying a session, it will create its own session 335 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 336 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 337 * session 338 * with all other media players or audio tracks in the same session, otherwise a new session 339 * will be created for this track if none is supplied. 340 * @param streamType the type of the audio stream. See 341 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 342 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 343 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 344 * @param sampleRateInHz the initial source sample rate expressed in Hz. 345 * @param channelConfig describes the configuration of the audio channels. 346 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 347 * {@link AudioFormat#CHANNEL_OUT_STEREO} 348 * @param audioFormat the format in which the audio data is represented. 349 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 350 * {@link AudioFormat#ENCODING_PCM_8BIT}, 351 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 352 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 353 * from for playback. If using the AudioTrack in streaming mode, you can write data into 354 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 355 * this is the maximum size of the sound that will be played for this instance. 356 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 357 * for the successful creation of an AudioTrack instance in streaming mode. Using values 358 * smaller than getMinBufferSize() will result in an initialization failure. 359 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 360 * @param sessionId Id of audio session the AudioTrack must be attached to 361 * @throws java.lang.IllegalArgumentException 362 */ 363 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 364 int bufferSizeInBytes, int mode, int sessionId) 365 throws IllegalArgumentException { 366 // mState already == STATE_UNINITIALIZED 367 this((new AudioAttributes.Builder()) 368 .setLegacyStreamType(streamType) 369 .build(), 370 (new AudioFormat.Builder()) 371 .setChannelMask(channelConfig) 372 .setEncoding(audioFormat) 373 .setSampleRate(sampleRateInHz) 374 .build(), 375 bufferSizeInBytes, 376 mode, sessionId); 377 } 378 379 /** 380 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 381 * @param attributes a non-null {@link AudioAttributes} instance. 382 * @param format a non-null {@link AudioFormat} instance describing the format of the data 383 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 384 * configuring the audio format parameters such as encoding, channel mask and sample rate. 385 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 386 * from for playback. If using the AudioTrack in streaming mode, you can write data into 387 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 388 * this is the maximum size of the sound that will be played for this instance. 389 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 390 * for the successful creation of an AudioTrack instance in streaming mode. Using values 391 * smaller than getMinBufferSize() will result in an initialization failure. 392 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 393 * @param sessionId ID of audio session the AudioTrack must be attached to, or 394 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 395 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 396 * construction. 397 * @throws IllegalArgumentException 398 */ 399 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 400 int mode, int sessionId) 401 throws IllegalArgumentException { 402 // mState already == STATE_UNINITIALIZED 403 404 if (attributes == null) { 405 throw new IllegalArgumentException("Illegal null AudioAttributes"); 406 } 407 if (format == null) { 408 throw new IllegalArgumentException("Illegal null AudioFormat"); 409 } 410 411 // remember which looper is associated with the AudioTrack instantiation 412 Looper looper; 413 if ((looper = Looper.myLooper()) == null) { 414 looper = Looper.getMainLooper(); 415 } 416 417 int rate = 0; 418 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0) 419 { 420 rate = format.getSampleRate(); 421 } else { 422 rate = AudioSystem.getPrimaryOutputSamplingRate(); 423 if (rate <= 0) { 424 rate = 44100; 425 } 426 } 427 int channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 428 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) 429 { 430 channelMask = format.getChannelMask(); 431 } 432 int encoding = AudioFormat.ENCODING_DEFAULT; 433 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 434 encoding = format.getEncoding(); 435 } 436 audioParamCheck(rate, channelMask, encoding, mode); 437 mStreamType = AudioSystem.STREAM_DEFAULT; 438 439 audioBuffSizeCheck(bufferSizeInBytes); 440 441 mInitializationLooper = looper; 442 IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE); 443 mAppOps = IAppOpsService.Stub.asInterface(b); 444 445 mAttributes = (new AudioAttributes.Builder(attributes).build()); 446 447 if (sessionId < 0) { 448 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 449 } 450 451 int[] session = new int[1]; 452 session[0] = sessionId; 453 // native initialization 454 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 455 mSampleRate, mChannels, mAudioFormat, 456 mNativeBufferSizeInBytes, mDataLoadMode, session); 457 if (initResult != SUCCESS) { 458 loge("Error code "+initResult+" when initializing AudioTrack."); 459 return; // with mState == STATE_UNINITIALIZED 460 } 461 462 mSessionId = session[0]; 463 464 if (mDataLoadMode == MODE_STATIC) { 465 mState = STATE_NO_STATIC_DATA; 466 } else { 467 mState = STATE_INITIALIZED; 468 } 469 } 470 471 /** 472 * Builder class for {@link AudioTrack} objects. 473 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 474 * attributes and audio format parameters, you indicate which of those vary from the default 475 * behavior on the device. 476 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 477 * parameters, to be used by a new <code>AudioTrack</code> instance: 478 * 479 * <pre class="prettyprint"> 480 * AudioTrack player = new AudioTrack.Builder() 481 * .setAudioAttributes(new AudioAttributes.Builder() 482 * .setUsage(AudioAttributes.USAGE_ALARM) 483 * .setContentType(CONTENT_TYPE_MUSIC) 484 * .build()) 485 * .setAudioFormat(new AudioFormat.Builder() 486 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 487 * .setSampleRate(441000) 488 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 489 * .build()) 490 * .setBufferSize(minBuffSize) 491 * .build(); 492 * </pre> 493 * <p> 494 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 495 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 496 * <br>If the audio format is not specified or is incomplete, its sample rate will be the 497 * default output sample rate of the device (see 498 * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be 499 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 500 * {@link AudioFormat#ENCODING_PCM_16BIT}. 501 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 502 * {@link AudioTrack#MODE_STREAM} will be used. 503 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 504 * be generated. 505 */ 506 public static class Builder { 507 private AudioAttributes mAttributes; 508 private AudioFormat mFormat; 509 private int mBufferSizeInBytes; 510 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 511 private int mMode = MODE_STREAM; 512 513 /** 514 * Constructs a new Builder with the default values as described above. 515 */ 516 public Builder() { 517 } 518 519 /** 520 * Sets the {@link AudioAttributes}. 521 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 522 * data to be played. 523 * @return the same Builder instance. 524 * @throws IllegalArgumentException 525 */ 526 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 527 throws IllegalArgumentException { 528 if (attributes == null) { 529 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 530 } 531 // keep reference, we only copy the data when building 532 mAttributes = attributes; 533 return this; 534 } 535 536 /** 537 * Sets the format of the audio data to be played by the {@link AudioTrack}. 538 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 539 * as encoding, channel mask and sample rate. 540 * @param format a non-null {@link AudioFormat} instance. 541 * @return the same Builder instance. 542 * @throws IllegalArgumentException 543 */ 544 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 545 throws IllegalArgumentException { 546 if (format == null) { 547 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 548 } 549 // keep reference, we only copy the data when building 550 mFormat = format; 551 return this; 552 } 553 554 /** 555 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 556 * If using the {@link AudioTrack} in streaming mode 557 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 558 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 559 * the minimum required buffer size for the successful creation of an AudioTrack instance 560 * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result 561 * in an exception when trying to build the <code>AudioTrack</code>. 562 * <br>If using the <code>AudioTrack</code> in static mode (see 563 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 564 * played by this instance. 565 * @param bufferSizeInBytes 566 * @return the same Builder instance. 567 * @throws IllegalArgumentException 568 */ 569 public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes) 570 throws IllegalArgumentException { 571 if (bufferSizeInBytes <= 0) { 572 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 573 } 574 mBufferSizeInBytes = bufferSizeInBytes; 575 return this; 576 } 577 578 /** 579 * Sets the mode under which buffers of audio data are transferred from the 580 * {@link AudioTrack} to the framework. 581 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 582 * @return the same Builder instance. 583 * @throws IllegalArgumentException 584 */ 585 public @NonNull Builder setTransferMode(@TransferMode int mode) 586 throws IllegalArgumentException { 587 switch(mode) { 588 case MODE_STREAM: 589 case MODE_STATIC: 590 mMode = mode; 591 break; 592 default: 593 throw new IllegalArgumentException("Invalid transfer mode " + mode); 594 } 595 return this; 596 } 597 598 /** 599 * Sets the session ID the {@link AudioTrack} will be attached to. 600 * @param sessionId a strictly positive ID number retrieved from another 601 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 602 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 603 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 604 * @return the same Builder instance. 605 * @throws IllegalArgumentException 606 */ 607 public @NonNull Builder setSessionId(int sessionId) 608 throws IllegalArgumentException { 609 if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 610 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 611 } 612 mSessionId = sessionId; 613 return this; 614 } 615 616 /** 617 * Builds an {@link AudioTrack} instance initialized with all the parameters set 618 * on this <code>Builder</code>. 619 * @return a new {@link AudioTrack} instance. 620 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 621 * were incompatible, or if they are not supported by the device. 622 */ 623 public @NonNull AudioTrack build() throws UnsupportedOperationException { 624 if (mAttributes == null) { 625 mAttributes = new AudioAttributes.Builder() 626 .setUsage(AudioAttributes.USAGE_MEDIA) 627 .build(); 628 } 629 if (mFormat == null) { 630 mFormat = new AudioFormat.Builder() 631 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 632 .setSampleRate(AudioSystem.getPrimaryOutputSamplingRate()) 633 .setEncoding(AudioFormat.ENCODING_DEFAULT) 634 .build(); 635 } 636 try { 637 return new AudioTrack(mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId); 638 } catch (IllegalArgumentException e) { 639 throw new UnsupportedOperationException(e.getMessage()); 640 } 641 } 642 } 643 644 // mask of all the channels supported by this implementation 645 private static final int SUPPORTED_OUT_CHANNELS = 646 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 647 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 648 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 649 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 650 AudioFormat.CHANNEL_OUT_BACK_LEFT | 651 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 652 AudioFormat.CHANNEL_OUT_BACK_CENTER | 653 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 654 AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 655 656 // Convenience method for the constructor's parameter checks. 657 // This is where constructor IllegalArgumentException-s are thrown 658 // postconditions: 659 // mChannelCount is valid 660 // mChannels is valid 661 // mAudioFormat is valid 662 // mSampleRate is valid 663 // mDataLoadMode is valid 664 private void audioParamCheck(int sampleRateInHz, 665 int channelConfig, int audioFormat, int mode) { 666 //-------------- 667 // sample rate, note these values are subject to change 668 if (sampleRateInHz < SAMPLE_RATE_HZ_MIN || sampleRateInHz > SAMPLE_RATE_HZ_MAX) { 669 throw new IllegalArgumentException(sampleRateInHz 670 + "Hz is not a supported sample rate."); 671 } 672 mSampleRate = sampleRateInHz; 673 674 //-------------- 675 // channel config 676 mChannelConfiguration = channelConfig; 677 678 switch (channelConfig) { 679 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 680 case AudioFormat.CHANNEL_OUT_MONO: 681 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 682 mChannelCount = 1; 683 mChannels = AudioFormat.CHANNEL_OUT_MONO; 684 break; 685 case AudioFormat.CHANNEL_OUT_STEREO: 686 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 687 mChannelCount = 2; 688 mChannels = AudioFormat.CHANNEL_OUT_STEREO; 689 break; 690 default: 691 if (!isMultichannelConfigSupported(channelConfig)) { 692 // input channel configuration features unsupported channels 693 throw new IllegalArgumentException("Unsupported channel configuration."); 694 } 695 mChannels = channelConfig; 696 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 697 } 698 699 //-------------- 700 // audio format 701 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 702 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 703 } 704 705 if (!AudioFormat.isValidEncoding(audioFormat)) { 706 throw new IllegalArgumentException("Unsupported audio encoding."); 707 } 708 mAudioFormat = audioFormat; 709 710 //-------------- 711 // audio load mode 712 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 713 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 714 throw new IllegalArgumentException("Invalid mode."); 715 } 716 mDataLoadMode = mode; 717 } 718 719 /** 720 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 721 * @param channelConfig the mask to validate 722 * @return false if the AudioTrack can't be used with such a mask 723 */ 724 private static boolean isMultichannelConfigSupported(int channelConfig) { 725 // check for unsupported channels 726 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 727 loge("Channel configuration features unsupported channels"); 728 return false; 729 } 730 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 731 if (channelCount > CHANNEL_COUNT_MAX) { 732 loge("Channel configuration contains too many channels " + 733 channelCount + ">" + CHANNEL_COUNT_MAX); 734 return false; 735 } 736 // check for unsupported multichannel combinations: 737 // - FL/FR must be present 738 // - L/R channels must be paired (e.g. no single L channel) 739 final int frontPair = 740 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 741 if ((channelConfig & frontPair) != frontPair) { 742 loge("Front channels must be present in multichannel configurations"); 743 return false; 744 } 745 final int backPair = 746 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 747 if ((channelConfig & backPair) != 0) { 748 if ((channelConfig & backPair) != backPair) { 749 loge("Rear channels can't be used independently"); 750 return false; 751 } 752 } 753 final int sidePair = 754 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 755 if ((channelConfig & sidePair) != 0 756 && (channelConfig & sidePair) != sidePair) { 757 loge("Side channels can't be used independently"); 758 return false; 759 } 760 return true; 761 } 762 763 764 // Convenience method for the constructor's audio buffer size check. 765 // preconditions: 766 // mChannelCount is valid 767 // mAudioFormat is valid 768 // postcondition: 769 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) 770 private void audioBuffSizeCheck(int audioBufferSize) { 771 // NB: this section is only valid with PCM data. 772 // To update when supporting compressed formats 773 int frameSizeInBytes; 774 if (AudioFormat.isEncodingLinearPcm(mAudioFormat)) { 775 frameSizeInBytes = mChannelCount 776 * (AudioFormat.getBytesPerSample(mAudioFormat)); 777 } else { 778 frameSizeInBytes = 1; 779 } 780 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 781 throw new IllegalArgumentException("Invalid audio buffer size."); 782 } 783 784 mNativeBufferSizeInBytes = audioBufferSize; 785 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 786 } 787 788 789 /** 790 * Releases the native AudioTrack resources. 791 */ 792 public void release() { 793 // even though native_release() stops the native AudioTrack, we need to stop 794 // AudioTrack subclasses too. 795 try { 796 stop(); 797 } catch(IllegalStateException ise) { 798 // don't raise an exception, we're releasing the resources. 799 } 800 native_release(); 801 mState = STATE_UNINITIALIZED; 802 } 803 804 @Override 805 protected void finalize() { 806 native_finalize(); 807 } 808 809 //-------------------------------------------------------------------------- 810 // Getters 811 //-------------------- 812 /** 813 * Returns the minimum gain value, which is the constant 0.0. 814 * Gain values less than 0.0 will be clamped to 0.0. 815 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 816 * @return the minimum value, which is the constant 0.0. 817 */ 818 static public float getMinVolume() { 819 return GAIN_MIN; 820 } 821 822 /** 823 * Returns the maximum gain value, which is greater than or equal to 1.0. 824 * Gain values greater than the maximum will be clamped to the maximum. 825 * <p>The word "volume" in the API name is historical; this is actually a gain. 826 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 827 * corresponds to a gain of 0 dB (sample values left unmodified). 828 * @return the maximum value, which is greater than or equal to 1.0. 829 */ 830 static public float getMaxVolume() { 831 return GAIN_MAX; 832 } 833 834 /** 835 * Returns the configured audio data sample rate in Hz 836 */ 837 public int getSampleRate() { 838 return mSampleRate; 839 } 840 841 /** 842 * Returns the current playback rate in Hz. 843 */ 844 public int getPlaybackRate() { 845 return native_get_playback_rate(); 846 } 847 848 /** 849 * Returns the configured audio data format. See {@link AudioFormat#ENCODING_PCM_16BIT} 850 * and {@link AudioFormat#ENCODING_PCM_8BIT}. 851 */ 852 public int getAudioFormat() { 853 return mAudioFormat; 854 } 855 856 /** 857 * Returns the type of audio stream this AudioTrack is configured for. 858 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 859 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 860 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 861 * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}. 862 */ 863 public int getStreamType() { 864 return mStreamType; 865 } 866 867 /** 868 * Returns the configured channel configuration. 869 * See {@link AudioFormat#CHANNEL_OUT_MONO} 870 * and {@link AudioFormat#CHANNEL_OUT_STEREO}. 871 */ 872 public int getChannelConfiguration() { 873 return mChannelConfiguration; 874 } 875 876 /** 877 * Returns the configured number of channels. 878 */ 879 public int getChannelCount() { 880 return mChannelCount; 881 } 882 883 /** 884 * Returns the state of the AudioTrack instance. This is useful after the 885 * AudioTrack instance has been created to check if it was initialized 886 * properly. This ensures that the appropriate resources have been acquired. 887 * @see #STATE_INITIALIZED 888 * @see #STATE_NO_STATIC_DATA 889 * @see #STATE_UNINITIALIZED 890 */ 891 public int getState() { 892 return mState; 893 } 894 895 /** 896 * Returns the playback state of the AudioTrack instance. 897 * @see #PLAYSTATE_STOPPED 898 * @see #PLAYSTATE_PAUSED 899 * @see #PLAYSTATE_PLAYING 900 */ 901 public int getPlayState() { 902 synchronized (mPlayStateLock) { 903 return mPlayState; 904 } 905 } 906 907 /** 908 * Returns the "native frame count", derived from the bufferSizeInBytes specified at 909 * creation time and converted to frame units. 910 * If track's creation mode is {@link #MODE_STATIC}, 911 * it is equal to the specified bufferSizeInBytes converted to frame units. 912 * If track's creation mode is {@link #MODE_STREAM}, 913 * it is typically greater than or equal to the specified bufferSizeInBytes converted to frame 914 * units; it may be rounded up to a larger value if needed by the target device implementation. 915 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 916 * See {@link AudioManager#getProperty(String)} for key 917 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 918 */ 919 @Deprecated 920 protected int getNativeFrameCount() { 921 return native_get_native_frame_count(); 922 } 923 924 /** 925 * Returns marker position expressed in frames. 926 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 927 * or zero if marker is disabled. 928 */ 929 public int getNotificationMarkerPosition() { 930 return native_get_marker_pos(); 931 } 932 933 /** 934 * Returns the notification update period expressed in frames. 935 * Zero means that no position update notifications are being delivered. 936 */ 937 public int getPositionNotificationPeriod() { 938 return native_get_pos_update_period(); 939 } 940 941 /** 942 * Returns the playback head position expressed in frames. 943 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 944 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 945 * This is a continuously advancing counter. It will wrap (overflow) periodically, 946 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 947 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 948 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 949 * the total number of frames played since reset, 950 * <i>not</i> the current offset within the buffer. 951 */ 952 public int getPlaybackHeadPosition() { 953 return native_get_position(); 954 } 955 956 /** 957 * Returns this track's estimated latency in milliseconds. This includes the latency due 958 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 959 * 960 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 961 * a better solution. 962 * @hide 963 */ 964 public int getLatency() { 965 return native_get_latency(); 966 } 967 968 /** 969 * Returns the output sample rate in Hz for the specified stream type. 970 */ 971 static public int getNativeOutputSampleRate(int streamType) { 972 return native_get_output_sample_rate(streamType); 973 } 974 975 /** 976 * Returns the minimum buffer size required for the successful creation of an AudioTrack 977 * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't 978 * guarantee a smooth playback under load, and higher values should be chosen according to 979 * the expected frequency at which the buffer will be refilled with additional data to play. 980 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 981 * to a higher value than the initial source sample rate, be sure to configure the buffer size 982 * based on the highest planned sample rate. 983 * @param sampleRateInHz the source sample rate expressed in Hz. 984 * @param channelConfig describes the configuration of the audio channels. 985 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 986 * {@link AudioFormat#CHANNEL_OUT_STEREO} 987 * @param audioFormat the format in which the audio data is represented. 988 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 989 * {@link AudioFormat#ENCODING_PCM_8BIT}, 990 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 991 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 992 * or {@link #ERROR} if unable to query for output properties, 993 * or the minimum buffer size expressed in bytes. 994 */ 995 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 996 int channelCount = 0; 997 switch(channelConfig) { 998 case AudioFormat.CHANNEL_OUT_MONO: 999 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1000 channelCount = 1; 1001 break; 1002 case AudioFormat.CHANNEL_OUT_STEREO: 1003 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1004 channelCount = 2; 1005 break; 1006 default: 1007 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 1008 // input channel configuration features unsupported channels 1009 loge("getMinBufferSize(): Invalid channel configuration."); 1010 return ERROR_BAD_VALUE; 1011 } else { 1012 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1013 } 1014 } 1015 1016 if (!AudioFormat.isValidEncoding(audioFormat)) { 1017 loge("getMinBufferSize(): Invalid audio format."); 1018 return ERROR_BAD_VALUE; 1019 } 1020 1021 // sample rate, note these values are subject to change 1022 if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) { 1023 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 1024 return ERROR_BAD_VALUE; 1025 } 1026 1027 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 1028 if (size <= 0) { 1029 loge("getMinBufferSize(): error querying hardware"); 1030 return ERROR; 1031 } 1032 else { 1033 return size; 1034 } 1035 } 1036 1037 /** 1038 * Returns the audio session ID. 1039 * 1040 * @return the ID of the audio session this AudioTrack belongs to. 1041 */ 1042 public int getAudioSessionId() { 1043 return mSessionId; 1044 } 1045 1046 /** 1047 * Poll for a timestamp on demand. 1048 * <p> 1049 * If you need to track timestamps during initial warmup or after a routing or mode change, 1050 * you should request a new timestamp once per second until the reported timestamps 1051 * show that the audio clock is stable. 1052 * Thereafter, query for a new timestamp approximately once every 10 seconds to once per minute. 1053 * Calling this method more often is inefficient. 1054 * It is also counter-productive to call this method more often than recommended, 1055 * because the short-term differences between successive timestamp reports are not meaningful. 1056 * If you need a high-resolution mapping between frame position and presentation time, 1057 * consider implementing that at application level, based on low-resolution timestamps. 1058 * <p> 1059 * The audio data at the returned position may either already have been 1060 * presented, or may have not yet been presented but is committed to be presented. 1061 * It is not possible to request the time corresponding to a particular position, 1062 * or to request the (fractional) position corresponding to a particular time. 1063 * If you need such features, consider implementing them at application level. 1064 * 1065 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1066 * and owned by caller. 1067 * @return true if a timestamp is available, or false if no timestamp is available. 1068 * If a timestamp if available, 1069 * the AudioTimestamp instance is filled in with a position in frame units, together 1070 * with the estimated time when that frame was presented or is committed to 1071 * be presented. 1072 * In the case that no timestamp is available, any supplied instance is left unaltered. 1073 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 1074 * or during and immediately after a route change. 1075 */ 1076 // Add this text when the "on new timestamp" API is added: 1077 // Use if you need to get the most recent timestamp outside of the event callback handler. 1078 public boolean getTimestamp(AudioTimestamp timestamp) 1079 { 1080 if (timestamp == null) { 1081 throw new IllegalArgumentException(); 1082 } 1083 // It's unfortunate, but we have to either create garbage every time or use synchronized 1084 long[] longArray = new long[2]; 1085 int ret = native_get_timestamp(longArray); 1086 if (ret != SUCCESS) { 1087 return false; 1088 } 1089 timestamp.framePosition = longArray[0]; 1090 timestamp.nanoTime = longArray[1]; 1091 return true; 1092 } 1093 1094 1095 //-------------------------------------------------------------------------- 1096 // Initialization / configuration 1097 //-------------------- 1098 /** 1099 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1100 * for each periodic playback head position update. 1101 * Notifications will be received in the same thread as the one in which the AudioTrack 1102 * instance was created. 1103 * @param listener 1104 */ 1105 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 1106 setPlaybackPositionUpdateListener(listener, null); 1107 } 1108 1109 /** 1110 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1111 * for each periodic playback head position update. 1112 * Use this method to receive AudioTrack events in the Handler associated with another 1113 * thread than the one in which you created the AudioTrack instance. 1114 * @param listener 1115 * @param handler the Handler that will receive the event notification messages. 1116 */ 1117 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 1118 Handler handler) { 1119 if (listener != null) { 1120 mEventHandlerDelegate = new NativeEventHandlerDelegate(this, listener, handler); 1121 } else { 1122 mEventHandlerDelegate = null; 1123 } 1124 } 1125 1126 1127 private static float clampGainOrLevel(float gainOrLevel) { 1128 if (Float.isNaN(gainOrLevel)) { 1129 throw new IllegalArgumentException(); 1130 } 1131 if (gainOrLevel < GAIN_MIN) { 1132 gainOrLevel = GAIN_MIN; 1133 } else if (gainOrLevel > GAIN_MAX) { 1134 gainOrLevel = GAIN_MAX; 1135 } 1136 return gainOrLevel; 1137 } 1138 1139 1140 /** 1141 * Sets the specified left and right output gain values on the AudioTrack. 1142 * <p>Gain values are clamped to the closed interval [0.0, max] where 1143 * max is the value of {@link #getMaxVolume}. 1144 * A value of 0.0 results in zero gain (silence), and 1145 * a value of 1.0 means unity gain (signal unchanged). 1146 * The default value is 1.0 meaning unity gain. 1147 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1148 * @param leftGain output gain for the left channel. 1149 * @param rightGain output gain for the right channel 1150 * @return error code or success, see {@link #SUCCESS}, 1151 * {@link #ERROR_INVALID_OPERATION} 1152 * @deprecated Applications should use {@link #setVolume} instead, as it 1153 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1154 */ 1155 public int setStereoVolume(float leftGain, float rightGain) { 1156 if (isRestricted()) { 1157 return SUCCESS; 1158 } 1159 if (mState == STATE_UNINITIALIZED) { 1160 return ERROR_INVALID_OPERATION; 1161 } 1162 1163 leftGain = clampGainOrLevel(leftGain); 1164 rightGain = clampGainOrLevel(rightGain); 1165 1166 native_setVolume(leftGain, rightGain); 1167 1168 return SUCCESS; 1169 } 1170 1171 1172 /** 1173 * Sets the specified output gain value on all channels of this track. 1174 * <p>Gain values are clamped to the closed interval [0.0, max] where 1175 * max is the value of {@link #getMaxVolume}. 1176 * A value of 0.0 results in zero gain (silence), and 1177 * a value of 1.0 means unity gain (signal unchanged). 1178 * The default value is 1.0 meaning unity gain. 1179 * <p>This API is preferred over {@link #setStereoVolume}, as it 1180 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1181 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1182 * @param gain output gain for all channels. 1183 * @return error code or success, see {@link #SUCCESS}, 1184 * {@link #ERROR_INVALID_OPERATION} 1185 */ 1186 public int setVolume(float gain) { 1187 return setStereoVolume(gain, gain); 1188 } 1189 1190 1191 /** 1192 * Sets the playback sample rate for this track. This sets the sampling rate at which 1193 * the audio data will be consumed and played back 1194 * (as set by the sampleRateInHz parameter in the 1195 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 1196 * not the original sampling rate of the 1197 * content. For example, setting it to half the sample rate of the content will cause the 1198 * playback to last twice as long, but will also result in a pitch shift down by one octave. 1199 * The valid sample rate range is from 1 Hz to twice the value returned by 1200 * {@link #getNativeOutputSampleRate(int)}. 1201 * @param sampleRateInHz the sample rate expressed in Hz 1202 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1203 * {@link #ERROR_INVALID_OPERATION} 1204 */ 1205 public int setPlaybackRate(int sampleRateInHz) { 1206 if (mState != STATE_INITIALIZED) { 1207 return ERROR_INVALID_OPERATION; 1208 } 1209 if (sampleRateInHz <= 0) { 1210 return ERROR_BAD_VALUE; 1211 } 1212 return native_set_playback_rate(sampleRateInHz); 1213 } 1214 1215 1216 /** 1217 * Sets the position of the notification marker. At most one marker can be active. 1218 * @param markerInFrames marker position in wrapping frame units similar to 1219 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 1220 * To set a marker at a position which would appear as zero due to wraparound, 1221 * a workaround is to use a non-zero position near zero, such as -1 or 1. 1222 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1223 * {@link #ERROR_INVALID_OPERATION} 1224 */ 1225 public int setNotificationMarkerPosition(int markerInFrames) { 1226 if (mState == STATE_UNINITIALIZED) { 1227 return ERROR_INVALID_OPERATION; 1228 } 1229 return native_set_marker_pos(markerInFrames); 1230 } 1231 1232 1233 /** 1234 * Sets the period for the periodic notification event. 1235 * @param periodInFrames update period expressed in frames. 1236 * Zero period means no position updates. A negative period is not allowed. 1237 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 1238 */ 1239 public int setPositionNotificationPeriod(int periodInFrames) { 1240 if (mState == STATE_UNINITIALIZED) { 1241 return ERROR_INVALID_OPERATION; 1242 } 1243 return native_set_pos_update_period(periodInFrames); 1244 } 1245 1246 1247 /** 1248 * Sets the playback head position within the static buffer. 1249 * The track must be stopped or paused for the position to be changed, 1250 * and must use the {@link #MODE_STATIC} mode. 1251 * @param positionInFrames playback head position within buffer, expressed in frames. 1252 * Zero corresponds to start of buffer. 1253 * The position must not be greater than the buffer size in frames, or negative. 1254 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 1255 * the position values have different meanings. 1256 * <br> 1257 * If looping is currently enabled and the new position is greater than or equal to the 1258 * loop end marker, the behavior varies by API level: for API level 22 and above, 1259 * the looping is first disabled and then the position is set. 1260 * For earlier API levels, the behavior is unspecified. 1261 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1262 * {@link #ERROR_INVALID_OPERATION} 1263 */ 1264 public int setPlaybackHeadPosition(int positionInFrames) { 1265 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1266 getPlayState() == PLAYSTATE_PLAYING) { 1267 return ERROR_INVALID_OPERATION; 1268 } 1269 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 1270 return ERROR_BAD_VALUE; 1271 } 1272 return native_set_position(positionInFrames); 1273 } 1274 1275 /** 1276 * Sets the loop points and the loop count. The loop can be infinite. 1277 * Similarly to setPlaybackHeadPosition, 1278 * the track must be stopped or paused for the loop points to be changed, 1279 * and must use the {@link #MODE_STATIC} mode. 1280 * @param startInFrames loop start marker expressed in frames. 1281 * Zero corresponds to start of buffer. 1282 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 1283 * @param endInFrames loop end marker expressed in frames. 1284 * The total buffer size in frames corresponds to end of buffer. 1285 * The end marker must not be greater than the buffer size in frames. 1286 * For looping, the end marker must not be less than or equal to the start marker, 1287 * but to disable looping 1288 * it is permitted for start marker, end marker, and loop count to all be 0. 1289 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 1290 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 1291 * support, 1292 * {@link #ERROR_BAD_VALUE} is returned. 1293 * The loop range is the interval [startInFrames, endInFrames). 1294 * <br> 1295 * For API level 22 and above, the position is left unchanged, 1296 * unless it is greater than or equal to the loop end marker, in which case 1297 * it is forced to the loop start marker. 1298 * For earlier API levels, the effect on position is unspecified. 1299 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 1300 * A value of -1 means infinite looping, and 0 disables looping. 1301 * A value of positive N means to "loop" (go back) N times. For example, 1302 * a value of one means to play the region two times in total. 1303 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1304 * {@link #ERROR_INVALID_OPERATION} 1305 */ 1306 public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) { 1307 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1308 getPlayState() == PLAYSTATE_PLAYING) { 1309 return ERROR_INVALID_OPERATION; 1310 } 1311 if (loopCount == 0) { 1312 ; // explicitly allowed as an exception to the loop region range check 1313 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 1314 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 1315 return ERROR_BAD_VALUE; 1316 } 1317 return native_set_loop(startInFrames, endInFrames, loopCount); 1318 } 1319 1320 /** 1321 * Sets the initialization state of the instance. This method was originally intended to be used 1322 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 1323 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 1324 * @param state the state of the AudioTrack instance 1325 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 1326 */ 1327 @Deprecated 1328 protected void setState(int state) { 1329 mState = state; 1330 } 1331 1332 1333 //--------------------------------------------------------- 1334 // Transport control methods 1335 //-------------------- 1336 /** 1337 * Starts playing an AudioTrack. 1338 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 1339 * the {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 1340 * or {@link #write(float[], int, int, int)} methods. 1341 * If the mode is {@link #MODE_STREAM}, you can optionally prime the 1342 * output buffer by writing up to bufferSizeInBytes (from constructor) before starting. 1343 * This priming will avoid an immediate underrun, but is not required. 1344 * 1345 * @throws IllegalStateException 1346 */ 1347 public void play() 1348 throws IllegalStateException { 1349 if (mState != STATE_INITIALIZED) { 1350 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 1351 } 1352 if (isRestricted()) { 1353 setVolume(0); 1354 } 1355 synchronized(mPlayStateLock) { 1356 native_start(); 1357 mPlayState = PLAYSTATE_PLAYING; 1358 } 1359 } 1360 1361 private boolean isRestricted() { 1362 if ((mAttributes.getFlags() & AudioAttributes.FLAG_BYPASS_INTERRUPTION_POLICY) != 0) { 1363 return false; 1364 } 1365 try { 1366 final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType); 1367 final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage, 1368 Process.myUid(), ActivityThread.currentPackageName()); 1369 return mode != AppOpsManager.MODE_ALLOWED; 1370 } catch (RemoteException e) { 1371 return false; 1372 } 1373 } 1374 1375 /** 1376 * Stops playing the audio data. 1377 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 1378 * after the last buffer that was written has been played. For an immediate stop, use 1379 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 1380 * back yet. 1381 * @throws IllegalStateException 1382 */ 1383 public void stop() 1384 throws IllegalStateException { 1385 if (mState != STATE_INITIALIZED) { 1386 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 1387 } 1388 1389 // stop playing 1390 synchronized(mPlayStateLock) { 1391 native_stop(); 1392 mPlayState = PLAYSTATE_STOPPED; 1393 } 1394 } 1395 1396 /** 1397 * Pauses the playback of the audio data. Data that has not been played 1398 * back will not be discarded. Subsequent calls to {@link #play} will play 1399 * this data back. See {@link #flush()} to discard this data. 1400 * 1401 * @throws IllegalStateException 1402 */ 1403 public void pause() 1404 throws IllegalStateException { 1405 if (mState != STATE_INITIALIZED) { 1406 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 1407 } 1408 //logd("pause()"); 1409 1410 // pause playback 1411 synchronized(mPlayStateLock) { 1412 native_pause(); 1413 mPlayState = PLAYSTATE_PAUSED; 1414 } 1415 } 1416 1417 1418 //--------------------------------------------------------- 1419 // Audio data supply 1420 //-------------------- 1421 1422 /** 1423 * Flushes the audio data currently queued for playback. Any data that has 1424 * been written but not yet presented will be discarded. No-op if not stopped or paused, 1425 * or if the track's creation mode is not {@link #MODE_STREAM}. 1426 * <BR> Note that although data written but not yet presented is discarded, there is no 1427 * guarantee that all of the buffer space formerly used by that data 1428 * is available for a subsequent write. 1429 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 1430 * less than or equal to the total buffer size 1431 * may return a short actual transfer count. 1432 */ 1433 public void flush() { 1434 if (mState == STATE_INITIALIZED) { 1435 // flush the data in native layer 1436 native_flush(); 1437 } 1438 1439 } 1440 1441 /** 1442 * Writes the audio data to the audio sink for playback (streaming mode), 1443 * or copies audio data for later playback (static buffer mode). 1444 * In streaming mode, will block until all data has been written to the audio sink. 1445 * In static buffer mode, copies the data to the buffer starting at offset 0. 1446 * Note that the actual playback of this data might occur after this function 1447 * returns. This function is thread safe with respect to {@link #stop} calls, 1448 * in which case all of the specified data might not be written to the audio sink. 1449 * 1450 * @param audioData the array that holds the data to play. 1451 * @param offsetInBytes the offset expressed in bytes in audioData where the data to play 1452 * starts. 1453 * @param sizeInBytes the number of bytes to read in audioData after the offset. 1454 * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION} 1455 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1456 * the parameters don't resolve to valid data and indexes, or 1457 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1458 * needs to be recreated. 1459 */ 1460 1461 public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) { 1462 1463 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1464 return ERROR_INVALID_OPERATION; 1465 } 1466 1467 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 1468 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 1469 || (offsetInBytes + sizeInBytes > audioData.length)) { 1470 return ERROR_BAD_VALUE; 1471 } 1472 1473 int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 1474 true /*isBlocking*/); 1475 1476 if ((mDataLoadMode == MODE_STATIC) 1477 && (mState == STATE_NO_STATIC_DATA) 1478 && (ret > 0)) { 1479 // benign race with respect to other APIs that read mState 1480 mState = STATE_INITIALIZED; 1481 } 1482 1483 return ret; 1484 } 1485 1486 1487 /** 1488 * Writes the audio data to the audio sink for playback (streaming mode), 1489 * or copies audio data for later playback (static buffer mode). 1490 * In streaming mode, will block until all data has been written to the audio sink. 1491 * In static buffer mode, copies the data to the buffer starting at offset 0. 1492 * Note that the actual playback of this data might occur after this function 1493 * returns. This function is thread safe with respect to {@link #stop} calls, 1494 * in which case all of the specified data might not be written to the audio sink. 1495 * 1496 * @param audioData the array that holds the data to play. 1497 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1498 * starts. 1499 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1500 * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION} 1501 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1502 * the parameters don't resolve to valid data and indexes. 1503 */ 1504 1505 public int write(short[] audioData, int offsetInShorts, int sizeInShorts) { 1506 1507 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1508 return ERROR_INVALID_OPERATION; 1509 } 1510 1511 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 1512 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 1513 || (offsetInShorts + sizeInShorts > audioData.length)) { 1514 return ERROR_BAD_VALUE; 1515 } 1516 1517 int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat); 1518 1519 if ((mDataLoadMode == MODE_STATIC) 1520 && (mState == STATE_NO_STATIC_DATA) 1521 && (ret > 0)) { 1522 // benign race with respect to other APIs that read mState 1523 mState = STATE_INITIALIZED; 1524 } 1525 1526 return ret; 1527 } 1528 1529 1530 /** 1531 * Writes the audio data to the audio sink for playback (streaming mode), 1532 * or copies audio data for later playback (static buffer mode). 1533 * In static buffer mode, copies the data to the buffer starting at offset 0, 1534 * and the write mode is ignored. 1535 * In streaming mode, the blocking behavior will depend on the write mode. 1536 * <p> 1537 * Note that the actual playback of this data might occur after this function 1538 * returns. This function is thread safe with respect to {@link #stop} calls, 1539 * in which case all of the specified data might not be written to the audio sink. 1540 * <p> 1541 * @param audioData the array that holds the data to play. 1542 * The implementation does not clip for sample values within the nominal range 1543 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 1544 * less than or equal to unity (1.0f), and in the absence of post-processing effects 1545 * that could add energy, such as reverb. For the convenience of applications 1546 * that compute samples using filters with non-unity gain, 1547 * sample values +3 dB beyond the nominal range are permitted. 1548 * However such values may eventually be limited or clipped, depending on various gains 1549 * and later processing in the audio path. Therefore applications are encouraged 1550 * to provide samples values within the nominal range. 1551 * @param offsetInFloats the offset, expressed as a number of floats, 1552 * in audioData where the data to play starts. 1553 * @param sizeInFloats the number of floats to read in audioData after the offset. 1554 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1555 * effect in static mode. 1556 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1557 * to the audio sink. 1558 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1559 * queuing as much audio data for playback as possible without blocking. 1560 * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION} 1561 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1562 * the parameters don't resolve to valid data and indexes. 1563 */ 1564 public int write(float[] audioData, int offsetInFloats, int sizeInFloats, 1565 @WriteMode int writeMode) { 1566 1567 if (mState == STATE_UNINITIALIZED) { 1568 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 1569 return ERROR_INVALID_OPERATION; 1570 } 1571 1572 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 1573 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 1574 return ERROR_INVALID_OPERATION; 1575 } 1576 1577 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1578 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1579 return ERROR_BAD_VALUE; 1580 } 1581 1582 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 1583 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 1584 || (offsetInFloats + sizeInFloats > audioData.length)) { 1585 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 1586 return ERROR_BAD_VALUE; 1587 } 1588 1589 int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 1590 writeMode == WRITE_BLOCKING); 1591 1592 if ((mDataLoadMode == MODE_STATIC) 1593 && (mState == STATE_NO_STATIC_DATA) 1594 && (ret > 0)) { 1595 // benign race with respect to other APIs that read mState 1596 mState = STATE_INITIALIZED; 1597 } 1598 1599 return ret; 1600 } 1601 1602 1603 /** 1604 * Writes the audio data to the audio sink for playback (streaming mode), 1605 * or copies audio data for later playback (static buffer mode). 1606 * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write 1607 * mode is ignored. 1608 * In streaming mode, the blocking behavior will depend on the write mode. 1609 * @param audioData the buffer that holds the data to play, starting at the position reported 1610 * by <code>audioData.position()</code>. 1611 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 1612 * have been advanced to reflect the amount of data that was successfully written to 1613 * the AudioTrack. 1614 * @param sizeInBytes number of bytes to write. 1615 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 1616 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1617 * effect in static mode. 1618 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1619 * to the audio sink. 1620 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1621 * queuing as much audio data for playback as possible without blocking. 1622 * @return 0 or a positive number of bytes that were written, or 1623 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION} 1624 */ 1625 public int write(ByteBuffer audioData, int sizeInBytes, 1626 @WriteMode int writeMode) { 1627 1628 if (mState == STATE_UNINITIALIZED) { 1629 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 1630 return ERROR_INVALID_OPERATION; 1631 } 1632 1633 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1634 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1635 return ERROR_BAD_VALUE; 1636 } 1637 1638 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 1639 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 1640 return ERROR_BAD_VALUE; 1641 } 1642 1643 int ret = 0; 1644 if (audioData.isDirect()) { 1645 ret = native_write_native_bytes(audioData, 1646 audioData.position(), sizeInBytes, mAudioFormat, 1647 writeMode == WRITE_BLOCKING); 1648 } else { 1649 ret = native_write_byte(NioUtils.unsafeArray(audioData), 1650 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 1651 sizeInBytes, mAudioFormat, 1652 writeMode == WRITE_BLOCKING); 1653 } 1654 1655 if ((mDataLoadMode == MODE_STATIC) 1656 && (mState == STATE_NO_STATIC_DATA) 1657 && (ret > 0)) { 1658 // benign race with respect to other APIs that read mState 1659 mState = STATE_INITIALIZED; 1660 } 1661 1662 if (ret > 0) { 1663 audioData.position(audioData.position() + ret); 1664 } 1665 1666 return ret; 1667 } 1668 1669 /** 1670 * Sets the playback head position within the static buffer to zero, 1671 * that is it rewinds to start of static buffer. 1672 * The track must be stopped or paused, and 1673 * the track's creation mode must be {@link #MODE_STATIC}. 1674 * <p> 1675 * For API level 22 and above, also resets the value returned by 1676 * {@link #getPlaybackHeadPosition()} to zero. 1677 * For earlier API levels, the reset behavior is unspecified. 1678 * <p> 1679 * {@link #setPlaybackHeadPosition(int)} to zero 1680 * is recommended instead when the reset of {@link #getPlaybackHeadPosition} is not needed. 1681 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1682 * {@link #ERROR_INVALID_OPERATION} 1683 */ 1684 public int reloadStaticData() { 1685 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 1686 return ERROR_INVALID_OPERATION; 1687 } 1688 return native_reload_static(); 1689 } 1690 1691 //-------------------------------------------------------------------------- 1692 // Audio effects management 1693 //-------------------- 1694 1695 /** 1696 * Attaches an auxiliary effect to the audio track. A typical auxiliary 1697 * effect is a reverberation effect which can be applied on any sound source 1698 * that directs a certain amount of its energy to this effect. This amount 1699 * is defined by setAuxEffectSendLevel(). 1700 * {@see #setAuxEffectSendLevel(float)}. 1701 * <p>After creating an auxiliary effect (e.g. 1702 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 1703 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 1704 * this method to attach the audio track to the effect. 1705 * <p>To detach the effect from the audio track, call this method with a 1706 * null effect id. 1707 * 1708 * @param effectId system wide unique id of the effect to attach 1709 * @return error code or success, see {@link #SUCCESS}, 1710 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 1711 */ 1712 public int attachAuxEffect(int effectId) { 1713 if (mState == STATE_UNINITIALIZED) { 1714 return ERROR_INVALID_OPERATION; 1715 } 1716 return native_attachAuxEffect(effectId); 1717 } 1718 1719 /** 1720 * Sets the send level of the audio track to the attached auxiliary effect 1721 * {@link #attachAuxEffect(int)}. Effect levels 1722 * are clamped to the closed interval [0.0, max] where 1723 * max is the value of {@link #getMaxVolume}. 1724 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 1725 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 1726 * this method must be called for the effect to be applied. 1727 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 1728 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 1729 * so an appropriate conversion from linear UI input x to level is: 1730 * x == 0 -> level = 0 1731 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 1732 * 1733 * @param level linear send level 1734 * @return error code or success, see {@link #SUCCESS}, 1735 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 1736 */ 1737 public int setAuxEffectSendLevel(float level) { 1738 if (isRestricted()) { 1739 return SUCCESS; 1740 } 1741 if (mState == STATE_UNINITIALIZED) { 1742 return ERROR_INVALID_OPERATION; 1743 } 1744 level = clampGainOrLevel(level); 1745 int err = native_setAuxEffectSendLevel(level); 1746 return err == 0 ? SUCCESS : ERROR; 1747 } 1748 1749 //--------------------------------------------------------- 1750 // Interface definitions 1751 //-------------------- 1752 /** 1753 * Interface definition for a callback to be invoked when the playback head position of 1754 * an AudioTrack has reached a notification marker or has increased by a certain period. 1755 */ 1756 public interface OnPlaybackPositionUpdateListener { 1757 /** 1758 * Called on the listener to notify it that the previously set marker has been reached 1759 * by the playback head. 1760 */ 1761 void onMarkerReached(AudioTrack track); 1762 1763 /** 1764 * Called on the listener to periodically notify it that the playback head has reached 1765 * a multiple of the notification period. 1766 */ 1767 void onPeriodicNotification(AudioTrack track); 1768 } 1769 1770 //--------------------------------------------------------- 1771 // Inner classes 1772 //-------------------- 1773 /** 1774 * Helper class to handle the forwarding of native events to the appropriate listener 1775 * (potentially) handled in a different thread 1776 */ 1777 private class NativeEventHandlerDelegate { 1778 private final Handler mHandler; 1779 1780 NativeEventHandlerDelegate(final AudioTrack track, 1781 final OnPlaybackPositionUpdateListener listener, 1782 Handler handler) { 1783 // find the looper for our new event handler 1784 Looper looper; 1785 if (handler != null) { 1786 looper = handler.getLooper(); 1787 } else { 1788 // no given handler, use the looper the AudioTrack was created in 1789 looper = mInitializationLooper; 1790 } 1791 1792 // construct the event handler with this looper 1793 if (looper != null) { 1794 // implement the event handler delegate 1795 mHandler = new Handler(looper) { 1796 @Override 1797 public void handleMessage(Message msg) { 1798 if (track == null) { 1799 return; 1800 } 1801 switch(msg.what) { 1802 case NATIVE_EVENT_MARKER: 1803 if (listener != null) { 1804 listener.onMarkerReached(track); 1805 } 1806 break; 1807 case NATIVE_EVENT_NEW_POS: 1808 if (listener != null) { 1809 listener.onPeriodicNotification(track); 1810 } 1811 break; 1812 default: 1813 loge("Unknown native event type: " + msg.what); 1814 break; 1815 } 1816 } 1817 }; 1818 } else { 1819 mHandler = null; 1820 } 1821 } 1822 1823 Handler getHandler() { 1824 return mHandler; 1825 } 1826 } 1827 1828 1829 //--------------------------------------------------------- 1830 // Java methods called from the native side 1831 //-------------------- 1832 @SuppressWarnings("unused") 1833 private static void postEventFromNative(Object audiotrack_ref, 1834 int what, int arg1, int arg2, Object obj) { 1835 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 1836 AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get(); 1837 if (track == null) { 1838 return; 1839 } 1840 1841 NativeEventHandlerDelegate delegate = track.mEventHandlerDelegate; 1842 if (delegate != null) { 1843 Handler handler = delegate.getHandler(); 1844 if (handler != null) { 1845 Message m = handler.obtainMessage(what, arg1, arg2, obj); 1846 handler.sendMessage(m); 1847 } 1848 } 1849 1850 } 1851 1852 1853 //--------------------------------------------------------- 1854 // Native methods called from the Java side 1855 //-------------------- 1856 1857 // post-condition: mStreamType is overwritten with a value 1858 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 1859 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC 1860 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 1861 Object /*AudioAttributes*/ attributes, 1862 int sampleRate, int channelMask, int audioFormat, 1863 int buffSizeInBytes, int mode, int[] sessionId); 1864 1865 private native final void native_finalize(); 1866 1867 private native final void native_release(); 1868 1869 private native final void native_start(); 1870 1871 private native final void native_stop(); 1872 1873 private native final void native_pause(); 1874 1875 private native final void native_flush(); 1876 1877 private native final int native_write_byte(byte[] audioData, 1878 int offsetInBytes, int sizeInBytes, int format, 1879 boolean isBlocking); 1880 1881 private native final int native_write_short(short[] audioData, 1882 int offsetInShorts, int sizeInShorts, int format); 1883 1884 private native final int native_write_float(float[] audioData, 1885 int offsetInFloats, int sizeInFloats, int format, 1886 boolean isBlocking); 1887 1888 private native final int native_write_native_bytes(Object audioData, 1889 int positionInBytes, int sizeInBytes, int format, boolean blocking); 1890 1891 private native final int native_reload_static(); 1892 1893 private native final int native_get_native_frame_count(); 1894 1895 private native final void native_setVolume(float leftVolume, float rightVolume); 1896 1897 private native final int native_set_playback_rate(int sampleRateInHz); 1898 private native final int native_get_playback_rate(); 1899 1900 private native final int native_set_marker_pos(int marker); 1901 private native final int native_get_marker_pos(); 1902 1903 private native final int native_set_pos_update_period(int updatePeriod); 1904 private native final int native_get_pos_update_period(); 1905 1906 private native final int native_set_position(int position); 1907 private native final int native_get_position(); 1908 1909 private native final int native_get_latency(); 1910 1911 // longArray must be a non-null array of length >= 2 1912 // [0] is assigned the frame position 1913 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds 1914 private native final int native_get_timestamp(long[] longArray); 1915 1916 private native final int native_set_loop(int start, int end, int loopCount); 1917 1918 static private native final int native_get_output_sample_rate(int streamType); 1919 static private native final int native_get_min_buff_size( 1920 int sampleRateInHz, int channelConfig, int audioFormat); 1921 1922 private native final int native_attachAuxEffect(int effectId); 1923 private native final int native_setAuxEffectSendLevel(float level); 1924 1925 //--------------------------------------------------------- 1926 // Utility methods 1927 //------------------ 1928 1929 private static void logd(String msg) { 1930 Log.d(TAG, msg); 1931 } 1932 1933 private static void loge(String msg) { 1934 Log.e(TAG, msg); 1935 } 1936} 1937