AudioTrack.java revision 2d6de4c38c899707e0596b7fa4dad9bbb3eb6b60
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17package android.media; 18 19import java.lang.annotation.Retention; 20import java.lang.annotation.RetentionPolicy; 21import java.lang.ref.WeakReference; 22import java.lang.Math; 23import java.nio.ByteBuffer; 24import java.nio.ByteOrder; 25import java.nio.NioUtils; 26 27import android.annotation.IntDef; 28import android.annotation.NonNull; 29import android.annotation.SystemApi; 30import android.app.ActivityThread; 31import android.app.AppOpsManager; 32import android.content.Context; 33import android.os.Handler; 34import android.os.IBinder; 35import android.os.Looper; 36import android.os.Message; 37import android.os.Process; 38import android.os.RemoteException; 39import android.os.ServiceManager; 40import android.util.Log; 41 42import com.android.internal.app.IAppOpsService; 43 44 45/** 46 * The AudioTrack class manages and plays a single audio resource for Java applications. 47 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 48 * achieved by "pushing" the data to the AudioTrack object using one of the 49 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 50 * and {@link #write(float[], int, int, int)} methods. 51 * 52 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 53 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 54 * one of the {@code write()} methods. These are blocking and return when the data has been 55 * transferred from the Java layer to the native layer and queued for playback. The streaming 56 * mode is most useful when playing blocks of audio data that for instance are: 57 * 58 * <ul> 59 * <li>too big to fit in memory because of the duration of the sound to play,</li> 60 * <li>too big to fit in memory because of the characteristics of the audio data 61 * (high sampling rate, bits per sample ...)</li> 62 * <li>received or generated while previously queued audio is playing.</li> 63 * </ul> 64 * 65 * The static mode should be chosen when dealing with short sounds that fit in memory and 66 * that need to be played with the smallest latency possible. The static mode will 67 * therefore be preferred for UI and game sounds that are played often, and with the 68 * smallest overhead possible. 69 * 70 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 71 * The size of this buffer, specified during the construction, determines how long an AudioTrack 72 * can play before running out of data.<br> 73 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 74 * be played from it.<br> 75 * For the streaming mode, data will be written to the audio sink in chunks of 76 * sizes less than or equal to the total buffer size. 77 * 78 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 79 */ 80public class AudioTrack 81{ 82 //--------------------------------------------------------- 83 // Constants 84 //-------------------- 85 /** Minimum value for a linear gain or auxiliary effect level. 86 * This value must be exactly equal to 0.0f; do not change it. 87 */ 88 private static final float GAIN_MIN = 0.0f; 89 /** Maximum value for a linear gain or auxiliary effect level. 90 * This value must be greater than or equal to 1.0f. 91 */ 92 private static final float GAIN_MAX = 1.0f; 93 94 /** Minimum value for sample rate */ 95 private static final int SAMPLE_RATE_HZ_MIN = 4000; 96 /** Maximum value for sample rate */ 97 private static final int SAMPLE_RATE_HZ_MAX = 96000; 98 99 /** Maximum value for AudioTrack channel count */ 100 private static final int CHANNEL_COUNT_MAX = 8; 101 102 /** indicates AudioTrack state is stopped */ 103 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 104 /** indicates AudioTrack state is paused */ 105 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 106 /** indicates AudioTrack state is playing */ 107 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 108 109 // keep these values in sync with android_media_AudioTrack.cpp 110 /** 111 * Creation mode where audio data is transferred from Java to the native layer 112 * only once before the audio starts playing. 113 */ 114 public static final int MODE_STATIC = 0; 115 /** 116 * Creation mode where audio data is streamed from Java to the native layer 117 * as the audio is playing. 118 */ 119 public static final int MODE_STREAM = 1; 120 121 /** @hide */ 122 @IntDef({ 123 MODE_STATIC, 124 MODE_STREAM 125 }) 126 @Retention(RetentionPolicy.SOURCE) 127 public @interface TransferMode {} 128 129 /** 130 * State of an AudioTrack that was not successfully initialized upon creation. 131 */ 132 public static final int STATE_UNINITIALIZED = 0; 133 /** 134 * State of an AudioTrack that is ready to be used. 135 */ 136 public static final int STATE_INITIALIZED = 1; 137 /** 138 * State of a successfully initialized AudioTrack that uses static data, 139 * but that hasn't received that data yet. 140 */ 141 public static final int STATE_NO_STATIC_DATA = 2; 142 143 /** 144 * Denotes a successful operation. 145 */ 146 public static final int SUCCESS = AudioSystem.SUCCESS; 147 /** 148 * Denotes a generic operation failure. 149 */ 150 public static final int ERROR = AudioSystem.ERROR; 151 /** 152 * Denotes a failure due to the use of an invalid value. 153 */ 154 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 155 /** 156 * Denotes a failure due to the improper use of a method. 157 */ 158 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 159 160 // Error codes: 161 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 162 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 163 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 164 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 165 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 166 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 167 168 // Events: 169 // to keep in sync with frameworks/av/include/media/AudioTrack.h 170 /** 171 * Event id denotes when playback head has reached a previously set marker. 172 */ 173 private static final int NATIVE_EVENT_MARKER = 3; 174 /** 175 * Event id denotes when previously set update period has elapsed during playback. 176 */ 177 private static final int NATIVE_EVENT_NEW_POS = 4; 178 179 private final static String TAG = "android.media.AudioTrack"; 180 181 182 /** @hide */ 183 @IntDef({ 184 WRITE_BLOCKING, 185 WRITE_NON_BLOCKING 186 }) 187 @Retention(RetentionPolicy.SOURCE) 188 public @interface WriteMode {} 189 190 /** 191 * The write mode indicating the write operation will block until all data has been written, 192 * to be used in {@link #write(ByteBuffer, int, int)} 193 */ 194 public final static int WRITE_BLOCKING = 0; 195 /** 196 * The write mode indicating the write operation will return immediately after 197 * queuing as much audio data for playback as possible without blocking, to be used in 198 * {@link #write(ByteBuffer, int, int)}. 199 */ 200 public final static int WRITE_NON_BLOCKING = 1; 201 202 //-------------------------------------------------------------------------- 203 // Member variables 204 //-------------------- 205 /** 206 * Indicates the state of the AudioTrack instance. 207 */ 208 private int mState = STATE_UNINITIALIZED; 209 /** 210 * Indicates the play state of the AudioTrack instance. 211 */ 212 private int mPlayState = PLAYSTATE_STOPPED; 213 /** 214 * Lock to make sure mPlayState updates are reflecting the actual state of the object. 215 */ 216 private final Object mPlayStateLock = new Object(); 217 /** 218 * Sizes of the native audio buffer. 219 * These values are set during construction and can be stale. 220 * To obtain the current native audio buffer frame count use {@link #getNativeFrameCount()}. 221 */ 222 private int mNativeBufferSizeInBytes = 0; 223 private int mNativeBufferSizeInFrames = 0; 224 /** 225 * Handler for events coming from the native code. 226 */ 227 private NativeEventHandlerDelegate mEventHandlerDelegate; 228 /** 229 * Looper associated with the thread that creates the AudioTrack instance. 230 */ 231 private final Looper mInitializationLooper; 232 /** 233 * The audio data source sampling rate in Hz. 234 */ 235 private int mSampleRate; // initialized by all constructors 236 /** 237 * The number of audio output channels (1 is mono, 2 is stereo). 238 */ 239 private int mChannelCount = 1; 240 /** 241 * The audio channel mask used for calling native AudioTrack 242 */ 243 private int mChannels = AudioFormat.CHANNEL_OUT_MONO; 244 245 /** 246 * The type of the audio stream to play. See 247 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 248 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 249 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 250 * {@link AudioManager#STREAM_DTMF}. 251 */ 252 private int mStreamType = AudioManager.STREAM_MUSIC; 253 254 private final AudioAttributes mAttributes; 255 /** 256 * The way audio is consumed by the audio sink, streaming or static. 257 */ 258 private int mDataLoadMode = MODE_STREAM; 259 /** 260 * The current channel position mask, as specified on AudioTrack creation. 261 * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}. 262 * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified. 263 */ 264 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 265 /** 266 * The current audio channel index configuration (if specified). 267 */ 268 private int mChannelIndexMask = 0; 269 /** 270 * The encoding of the audio samples. 271 * @see AudioFormat#ENCODING_PCM_8BIT 272 * @see AudioFormat#ENCODING_PCM_16BIT 273 * @see AudioFormat#ENCODING_PCM_FLOAT 274 */ 275 private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT; 276 /** 277 * Audio session ID 278 */ 279 private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE; 280 /** 281 * Reference to the app-ops service. 282 */ 283 private final IAppOpsService mAppOps; 284 /** 285 * HW_AV_SYNC track AV Sync Header 286 */ 287 private ByteBuffer mAvSyncHeader = null; 288 /** 289 * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header 290 */ 291 private int mAvSyncBytesRemaining = 0; 292 293 //-------------------------------- 294 // Used exclusively by native code 295 //-------------------- 296 /** 297 * Accessed by native methods: provides access to C++ AudioTrack object. 298 */ 299 @SuppressWarnings("unused") 300 private long mNativeTrackInJavaObj; 301 /** 302 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 303 * the native AudioTrack object, but not stored in it). 304 */ 305 @SuppressWarnings("unused") 306 private long mJniData; 307 308 309 //-------------------------------------------------------------------------- 310 // Constructor, Finalize 311 //-------------------- 312 /** 313 * Class constructor. 314 * @param streamType the type of the audio stream. See 315 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 316 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 317 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 318 * @param sampleRateInHz the initial source sample rate expressed in Hz. 319 * @param channelConfig describes the configuration of the audio channels. 320 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 321 * {@link AudioFormat#CHANNEL_OUT_STEREO} 322 * @param audioFormat the format in which the audio data is represented. 323 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 324 * {@link AudioFormat#ENCODING_PCM_8BIT}, 325 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 326 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 327 * read from for playback. This should be a multiple of the frame size in bytes. 328 * <p> If the track's creation mode is {@link #MODE_STATIC}, 329 * this is the maximum length sample, or audio clip, that can be played by this instance. 330 * <p> If the track's creation mode is {@link #MODE_STREAM}, 331 * this should be the desired buffer size 332 * for the <code>AudioTrack</code> to satisfy the application's 333 * natural latency requirements. 334 * If <code>bufferSizeInBytes</code> is less than the 335 * minimum buffer size for the output sink, it is automatically increased to the minimum 336 * buffer size. 337 * The method {@link #getNativeFrameCount()} returns the 338 * actual size in frames of the native buffer created, which 339 * determines the frequency to write 340 * to the streaming <code>AudioTrack</code> to avoid underrun. 341 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 342 * @throws java.lang.IllegalArgumentException 343 */ 344 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 345 int bufferSizeInBytes, int mode) 346 throws IllegalArgumentException { 347 this(streamType, sampleRateInHz, channelConfig, audioFormat, 348 bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE); 349 } 350 351 /** 352 * Class constructor with audio session. Use this constructor when the AudioTrack must be 353 * attached to a particular audio session. The primary use of the audio session ID is to 354 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 355 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 356 * and media players in the same session and not to the output mix. 357 * When an AudioTrack is created without specifying a session, it will create its own session 358 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 359 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 360 * session 361 * with all other media players or audio tracks in the same session, otherwise a new session 362 * will be created for this track if none is supplied. 363 * @param streamType the type of the audio stream. See 364 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 365 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 366 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 367 * @param sampleRateInHz the initial source sample rate expressed in Hz. 368 * @param channelConfig describes the configuration of the audio channels. 369 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 370 * {@link AudioFormat#CHANNEL_OUT_STEREO} 371 * @param audioFormat the format in which the audio data is represented. 372 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 373 * {@link AudioFormat#ENCODING_PCM_8BIT}, 374 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 375 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 376 * from for playback. If using the AudioTrack in streaming mode, you can write data into 377 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 378 * this is the maximum size of the sound that will be played for this instance. 379 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 380 * for the successful creation of an AudioTrack instance in streaming mode. Using values 381 * smaller than getMinBufferSize() will result in an initialization failure. 382 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 383 * @param sessionId Id of audio session the AudioTrack must be attached to 384 * @throws java.lang.IllegalArgumentException 385 */ 386 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 387 int bufferSizeInBytes, int mode, int sessionId) 388 throws IllegalArgumentException { 389 // mState already == STATE_UNINITIALIZED 390 this((new AudioAttributes.Builder()) 391 .setLegacyStreamType(streamType) 392 .build(), 393 (new AudioFormat.Builder()) 394 .setChannelMask(channelConfig) 395 .setEncoding(audioFormat) 396 .setSampleRate(sampleRateInHz) 397 .build(), 398 bufferSizeInBytes, 399 mode, sessionId); 400 } 401 402 /** 403 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 404 * @param attributes a non-null {@link AudioAttributes} instance. 405 * @param format a non-null {@link AudioFormat} instance describing the format of the data 406 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 407 * configuring the audio format parameters such as encoding, channel mask and sample rate. 408 * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read 409 * from for playback. If using the AudioTrack in streaming mode, you can write data into 410 * this buffer in smaller chunks than this size. If using the AudioTrack in static mode, 411 * this is the maximum size of the sound that will be played for this instance. 412 * See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size 413 * for the successful creation of an AudioTrack instance in streaming mode. Using values 414 * smaller than getMinBufferSize() will result in an initialization failure. 415 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 416 * @param sessionId ID of audio session the AudioTrack must be attached to, or 417 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 418 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 419 * construction. 420 * @throws IllegalArgumentException 421 */ 422 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 423 int mode, int sessionId) 424 throws IllegalArgumentException { 425 // mState already == STATE_UNINITIALIZED 426 427 if (attributes == null) { 428 throw new IllegalArgumentException("Illegal null AudioAttributes"); 429 } 430 if (format == null) { 431 throw new IllegalArgumentException("Illegal null AudioFormat"); 432 } 433 434 // remember which looper is associated with the AudioTrack instantiation 435 Looper looper; 436 if ((looper = Looper.myLooper()) == null) { 437 looper = Looper.getMainLooper(); 438 } 439 440 int rate = 0; 441 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0) 442 { 443 rate = format.getSampleRate(); 444 } else { 445 rate = AudioSystem.getPrimaryOutputSamplingRate(); 446 if (rate <= 0) { 447 rate = 44100; 448 } 449 } 450 int channelIndexMask = 0; 451 if ((format.getPropertySetMask() 452 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { 453 channelIndexMask = format.getChannelIndexMask(); 454 } 455 int channelMask = 0; 456 if ((format.getPropertySetMask() 457 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { 458 channelMask = format.getChannelMask(); 459 } else if (channelIndexMask == 0) { // if no masks at all, use stereo 460 channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT 461 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 462 } 463 int encoding = AudioFormat.ENCODING_DEFAULT; 464 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 465 encoding = format.getEncoding(); 466 } 467 audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); 468 mStreamType = AudioSystem.STREAM_DEFAULT; 469 470 audioBuffSizeCheck(bufferSizeInBytes); 471 472 mInitializationLooper = looper; 473 IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE); 474 mAppOps = IAppOpsService.Stub.asInterface(b); 475 476 mAttributes = (new AudioAttributes.Builder(attributes).build()); 477 478 if (sessionId < 0) { 479 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 480 } 481 482 int[] session = new int[1]; 483 session[0] = sessionId; 484 // native initialization 485 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 486 mSampleRate, mChannels, mAudioFormat, 487 mNativeBufferSizeInBytes, mDataLoadMode, session); 488 if (initResult != SUCCESS) { 489 loge("Error code "+initResult+" when initializing AudioTrack."); 490 return; // with mState == STATE_UNINITIALIZED 491 } 492 493 mSessionId = session[0]; 494 495 if (mDataLoadMode == MODE_STATIC) { 496 mState = STATE_NO_STATIC_DATA; 497 } else { 498 mState = STATE_INITIALIZED; 499 } 500 } 501 502 /** 503 * Builder class for {@link AudioTrack} objects. 504 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 505 * attributes and audio format parameters, you indicate which of those vary from the default 506 * behavior on the device. 507 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 508 * parameters, to be used by a new <code>AudioTrack</code> instance: 509 * 510 * <pre class="prettyprint"> 511 * AudioTrack player = new AudioTrack.Builder() 512 * .setAudioAttributes(new AudioAttributes.Builder() 513 * .setUsage(AudioAttributes.USAGE_ALARM) 514 * .setContentType(CONTENT_TYPE_MUSIC) 515 * .build()) 516 * .setAudioFormat(new AudioFormat.Builder() 517 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 518 * .setSampleRate(441000) 519 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 520 * .build()) 521 * .setBufferSize(minBuffSize) 522 * .build(); 523 * </pre> 524 * <p> 525 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 526 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 527 * <br>If the audio format is not specified or is incomplete, its sample rate will be the 528 * default output sample rate of the device (see 529 * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be 530 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 531 * {@link AudioFormat#ENCODING_PCM_16BIT}. 532 * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)}, 533 * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used. 534 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 535 * <code>MODE_STREAM</code> will be used. 536 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 537 * be generated. 538 */ 539 public static class Builder { 540 private AudioAttributes mAttributes; 541 private AudioFormat mFormat; 542 private int mBufferSizeInBytes; 543 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 544 private int mMode = MODE_STREAM; 545 546 /** 547 * Constructs a new Builder with the default values as described above. 548 */ 549 public Builder() { 550 } 551 552 /** 553 * Sets the {@link AudioAttributes}. 554 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 555 * data to be played. 556 * @return the same Builder instance. 557 * @throws IllegalArgumentException 558 */ 559 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 560 throws IllegalArgumentException { 561 if (attributes == null) { 562 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 563 } 564 // keep reference, we only copy the data when building 565 mAttributes = attributes; 566 return this; 567 } 568 569 /** 570 * Sets the format of the audio data to be played by the {@link AudioTrack}. 571 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 572 * as encoding, channel mask and sample rate. 573 * @param format a non-null {@link AudioFormat} instance. 574 * @return the same Builder instance. 575 * @throws IllegalArgumentException 576 */ 577 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 578 throws IllegalArgumentException { 579 if (format == null) { 580 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 581 } 582 // keep reference, we only copy the data when building 583 mFormat = format; 584 return this; 585 } 586 587 /** 588 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 589 * If using the {@link AudioTrack} in streaming mode 590 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 591 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 592 * the minimum required buffer size for the successful creation of an AudioTrack instance 593 * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result 594 * in an exception when trying to build the <code>AudioTrack</code>. 595 * <br>If using the <code>AudioTrack</code> in static mode (see 596 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 597 * played by this instance. 598 * @param bufferSizeInBytes 599 * @return the same Builder instance. 600 * @throws IllegalArgumentException 601 */ 602 public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes) 603 throws IllegalArgumentException { 604 if (bufferSizeInBytes <= 0) { 605 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 606 } 607 mBufferSizeInBytes = bufferSizeInBytes; 608 return this; 609 } 610 611 /** 612 * Sets the mode under which buffers of audio data are transferred from the 613 * {@link AudioTrack} to the framework. 614 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 615 * @return the same Builder instance. 616 * @throws IllegalArgumentException 617 */ 618 public @NonNull Builder setTransferMode(@TransferMode int mode) 619 throws IllegalArgumentException { 620 switch(mode) { 621 case MODE_STREAM: 622 case MODE_STATIC: 623 mMode = mode; 624 break; 625 default: 626 throw new IllegalArgumentException("Invalid transfer mode " + mode); 627 } 628 return this; 629 } 630 631 /** 632 * Sets the session ID the {@link AudioTrack} will be attached to. 633 * @param sessionId a strictly positive ID number retrieved from another 634 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 635 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 636 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 637 * @return the same Builder instance. 638 * @throws IllegalArgumentException 639 */ 640 public @NonNull Builder setSessionId(int sessionId) 641 throws IllegalArgumentException { 642 if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 643 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 644 } 645 mSessionId = sessionId; 646 return this; 647 } 648 649 /** 650 * Builds an {@link AudioTrack} instance initialized with all the parameters set 651 * on this <code>Builder</code>. 652 * @return a new {@link AudioTrack} instance. 653 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 654 * were incompatible, or if they are not supported by the device. 655 */ 656 public @NonNull AudioTrack build() throws UnsupportedOperationException { 657 if (mAttributes == null) { 658 mAttributes = new AudioAttributes.Builder() 659 .setUsage(AudioAttributes.USAGE_MEDIA) 660 .build(); 661 } 662 if (mFormat == null) { 663 mFormat = new AudioFormat.Builder() 664 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 665 .setSampleRate(AudioSystem.getPrimaryOutputSamplingRate()) 666 .setEncoding(AudioFormat.ENCODING_DEFAULT) 667 .build(); 668 } 669 try { 670 // If the buffer size is not specified in streaming mode, 671 // use a single frame for the buffer size and let the 672 // native code figure out the minimum buffer size. 673 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) { 674 mBufferSizeInBytes = mFormat.getChannelCount() 675 * mFormat.getBytesPerSample(mFormat.getEncoding()); 676 } 677 return new AudioTrack(mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId); 678 } catch (IllegalArgumentException e) { 679 throw new UnsupportedOperationException(e.getMessage()); 680 } 681 } 682 } 683 684 // mask of all the channels supported by this implementation 685 private static final int SUPPORTED_OUT_CHANNELS = 686 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 687 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 688 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 689 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 690 AudioFormat.CHANNEL_OUT_BACK_LEFT | 691 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 692 AudioFormat.CHANNEL_OUT_BACK_CENTER | 693 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 694 AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 695 696 // Java channel mask definitions below match those 697 // in /system/core/include/system/audio.h in the JNI code of AudioTrack. 698 699 // internal maximum size for bits parameter, not part of public API 700 private static final int AUDIO_CHANNEL_BITS_LOG2 = 30; 701 702 // log(2) of maximum number of representations, not part of public API 703 private static final int AUDIO_CHANNEL_REPRESENTATION_LOG2 = 2; 704 705 // used to create a channel index mask or channel position mask 706 // with getChannelMaskFromRepresentationAndBits(); 707 private static final int CHANNEL_OUT_REPRESENTATION_POSITION = 0; 708 private static final int CHANNEL_OUT_REPRESENTATION_INDEX = 2; 709 710 /** 711 * Return the channel mask from its representation and bits. 712 * 713 * This creates a channel mask for mChannels which combines a 714 * representation field and a bits field. This is for internal 715 * communication to native code, not part of the public API. 716 * 717 * @param representation the type of channel mask, 718 * either CHANNEL_OUT_REPRESENTATION_POSITION 719 * or CHANNEL_OUT_REPRESENTATION_INDEX 720 * @param bits is the channel bits specifying occupancy 721 * @return the channel mask 722 * @throws java.lang.IllegalArgumentException if representation is not recognized or 723 * the bits field is not acceptable for that representation 724 */ 725 private static int getChannelMaskFromRepresentationAndBits(int representation, int bits) { 726 switch (representation) { 727 case CHANNEL_OUT_REPRESENTATION_POSITION: 728 case CHANNEL_OUT_REPRESENTATION_INDEX: 729 if ((bits & ~((1 << AUDIO_CHANNEL_BITS_LOG2) - 1)) != 0) { 730 throw new IllegalArgumentException("invalid bits " + bits); 731 } 732 return representation << AUDIO_CHANNEL_BITS_LOG2 | bits; 733 default: 734 throw new IllegalArgumentException("invalid representation " + representation); 735 } 736 } 737 738 // Convenience method for the constructor's parameter checks. 739 // This is where constructor IllegalArgumentException-s are thrown 740 // postconditions: 741 // mChannelCount is valid 742 // mChannels is valid 743 // mAudioFormat is valid 744 // mSampleRate is valid 745 // mDataLoadMode is valid 746 private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, 747 int audioFormat, int mode) { 748 //-------------- 749 // sample rate, note these values are subject to change 750 if (sampleRateInHz < SAMPLE_RATE_HZ_MIN || sampleRateInHz > SAMPLE_RATE_HZ_MAX) { 751 throw new IllegalArgumentException(sampleRateInHz 752 + "Hz is not a supported sample rate."); 753 } 754 mSampleRate = sampleRateInHz; 755 756 //-------------- 757 // channel config 758 mChannelConfiguration = channelConfig; 759 760 switch (channelConfig) { 761 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 762 case AudioFormat.CHANNEL_OUT_MONO: 763 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 764 mChannelCount = 1; 765 mChannels = AudioFormat.CHANNEL_OUT_MONO; 766 break; 767 case AudioFormat.CHANNEL_OUT_STEREO: 768 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 769 mChannelCount = 2; 770 mChannels = AudioFormat.CHANNEL_OUT_STEREO; 771 break; 772 default: 773 if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) { 774 mChannelCount = 0; 775 break; // channel index configuration only 776 } 777 if (!isMultichannelConfigSupported(channelConfig)) { 778 // input channel configuration features unsupported channels 779 throw new IllegalArgumentException("Unsupported channel configuration."); 780 } 781 mChannels = channelConfig; 782 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 783 } 784 // check the channel index configuration (if present) 785 mChannelIndexMask = channelIndexMask; 786 if (mChannelIndexMask != 0) { 787 // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2 788 final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1; 789 if ((channelIndexMask & ~indexMask) != 0) { 790 throw new IllegalArgumentException("Unsupported channel index configuration " 791 + channelIndexMask); 792 } 793 int channelIndexCount = Integer.bitCount(channelIndexMask); 794 if (mChannelCount == 0) { 795 mChannelCount = channelIndexCount; 796 } else if (mChannelCount != channelIndexCount) { 797 throw new IllegalArgumentException("Channel count must match"); 798 } 799 800 // AudioTrack prefers to use the channel index configuration 801 // over the channel position configuration if both are specified. 802 mChannels = getChannelMaskFromRepresentationAndBits( 803 CHANNEL_OUT_REPRESENTATION_INDEX, mChannelIndexMask); 804 } 805 806 //-------------- 807 // audio format 808 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 809 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 810 } 811 812 if (!AudioFormat.isValidEncoding(audioFormat)) { 813 throw new IllegalArgumentException("Unsupported audio encoding."); 814 } 815 mAudioFormat = audioFormat; 816 817 //-------------- 818 // audio load mode 819 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 820 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 821 throw new IllegalArgumentException("Invalid mode."); 822 } 823 mDataLoadMode = mode; 824 } 825 826 /** 827 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 828 * @param channelConfig the mask to validate 829 * @return false if the AudioTrack can't be used with such a mask 830 */ 831 private static boolean isMultichannelConfigSupported(int channelConfig) { 832 // check for unsupported channels 833 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 834 loge("Channel configuration features unsupported channels"); 835 return false; 836 } 837 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 838 if (channelCount > CHANNEL_COUNT_MAX) { 839 loge("Channel configuration contains too many channels " + 840 channelCount + ">" + CHANNEL_COUNT_MAX); 841 return false; 842 } 843 // check for unsupported multichannel combinations: 844 // - FL/FR must be present 845 // - L/R channels must be paired (e.g. no single L channel) 846 final int frontPair = 847 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 848 if ((channelConfig & frontPair) != frontPair) { 849 loge("Front channels must be present in multichannel configurations"); 850 return false; 851 } 852 final int backPair = 853 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 854 if ((channelConfig & backPair) != 0) { 855 if ((channelConfig & backPair) != backPair) { 856 loge("Rear channels can't be used independently"); 857 return false; 858 } 859 } 860 final int sidePair = 861 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 862 if ((channelConfig & sidePair) != 0 863 && (channelConfig & sidePair) != sidePair) { 864 loge("Side channels can't be used independently"); 865 return false; 866 } 867 return true; 868 } 869 870 871 // Convenience method for the constructor's audio buffer size check. 872 // preconditions: 873 // mChannelCount is valid 874 // mAudioFormat is valid 875 // postcondition: 876 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) 877 private void audioBuffSizeCheck(int audioBufferSize) { 878 // NB: this section is only valid with PCM data. 879 // To update when supporting compressed formats 880 int frameSizeInBytes; 881 if (AudioFormat.isEncodingLinearPcm(mAudioFormat)) { 882 frameSizeInBytes = mChannelCount 883 * (AudioFormat.getBytesPerSample(mAudioFormat)); 884 } else { 885 frameSizeInBytes = 1; 886 } 887 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 888 throw new IllegalArgumentException("Invalid audio buffer size."); 889 } 890 891 mNativeBufferSizeInBytes = audioBufferSize; 892 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 893 } 894 895 896 /** 897 * Releases the native AudioTrack resources. 898 */ 899 public void release() { 900 // even though native_release() stops the native AudioTrack, we need to stop 901 // AudioTrack subclasses too. 902 try { 903 stop(); 904 } catch(IllegalStateException ise) { 905 // don't raise an exception, we're releasing the resources. 906 } 907 native_release(); 908 mState = STATE_UNINITIALIZED; 909 } 910 911 @Override 912 protected void finalize() { 913 native_finalize(); 914 } 915 916 //-------------------------------------------------------------------------- 917 // Getters 918 //-------------------- 919 /** 920 * Returns the minimum gain value, which is the constant 0.0. 921 * Gain values less than 0.0 will be clamped to 0.0. 922 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 923 * @return the minimum value, which is the constant 0.0. 924 */ 925 static public float getMinVolume() { 926 return GAIN_MIN; 927 } 928 929 /** 930 * Returns the maximum gain value, which is greater than or equal to 1.0. 931 * Gain values greater than the maximum will be clamped to the maximum. 932 * <p>The word "volume" in the API name is historical; this is actually a gain. 933 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 934 * corresponds to a gain of 0 dB (sample values left unmodified). 935 * @return the maximum value, which is greater than or equal to 1.0. 936 */ 937 static public float getMaxVolume() { 938 return GAIN_MAX; 939 } 940 941 /** 942 * Returns the configured audio data sample rate in Hz 943 */ 944 public int getSampleRate() { 945 return mSampleRate; 946 } 947 948 /** 949 * Returns the current playback rate in Hz. 950 */ 951 public int getPlaybackRate() { 952 return native_get_playback_rate(); 953 } 954 955 /** 956 * Returns the configured audio data format. See {@link AudioFormat#ENCODING_PCM_16BIT} 957 * and {@link AudioFormat#ENCODING_PCM_8BIT}. 958 */ 959 public int getAudioFormat() { 960 return mAudioFormat; 961 } 962 963 /** 964 * Returns the type of audio stream this AudioTrack is configured for. 965 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 966 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 967 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 968 * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}. 969 */ 970 public int getStreamType() { 971 return mStreamType; 972 } 973 974 /** 975 * Returns the configured channel position mask. 976 * For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO}, 977 * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}. 978 */ 979 public int getChannelConfiguration() { 980 return mChannelConfiguration; 981 } 982 983 /** 984 * Returns the configured number of channels. 985 */ 986 public int getChannelCount() { 987 return mChannelCount; 988 } 989 990 /** 991 * Returns the state of the AudioTrack instance. This is useful after the 992 * AudioTrack instance has been created to check if it was initialized 993 * properly. This ensures that the appropriate resources have been acquired. 994 * @see #STATE_INITIALIZED 995 * @see #STATE_NO_STATIC_DATA 996 * @see #STATE_UNINITIALIZED 997 */ 998 public int getState() { 999 return mState; 1000 } 1001 1002 /** 1003 * Returns the playback state of the AudioTrack instance. 1004 * @see #PLAYSTATE_STOPPED 1005 * @see #PLAYSTATE_PAUSED 1006 * @see #PLAYSTATE_PLAYING 1007 */ 1008 public int getPlayState() { 1009 synchronized (mPlayStateLock) { 1010 return mPlayState; 1011 } 1012 } 1013 1014 /** 1015 * Returns the "native frame count" of the <code>AudioTrack</code> buffer. 1016 * <p> If the track's creation mode is {@link #MODE_STATIC}, 1017 * it is equal to the specified bufferSizeInBytes on construction, converted to frame units. 1018 * A static track's native frame count will not change. 1019 * <p> If the track's creation mode is {@link #MODE_STREAM}, 1020 * it is greater than or equal to the specified bufferSizeInBytes converted to frame units. 1021 * For streaming tracks, this value may be rounded up to a larger value if needed by 1022 * the target output sink, and 1023 * if the track is subsequently routed to a different output sink, the native 1024 * frame count may enlarge to accommodate. 1025 * See also {@link AudioManager#getProperty(String)} for key 1026 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1027 * @return current size in frames of the audio track buffer. 1028 * @throws IllegalStateException 1029 */ 1030 public int getNativeFrameCount() throws IllegalStateException { 1031 return native_get_native_frame_count(); 1032 } 1033 1034 /** 1035 * Returns marker position expressed in frames. 1036 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 1037 * or zero if marker is disabled. 1038 */ 1039 public int getNotificationMarkerPosition() { 1040 return native_get_marker_pos(); 1041 } 1042 1043 /** 1044 * Returns the notification update period expressed in frames. 1045 * Zero means that no position update notifications are being delivered. 1046 */ 1047 public int getPositionNotificationPeriod() { 1048 return native_get_pos_update_period(); 1049 } 1050 1051 /** 1052 * Returns the playback head position expressed in frames. 1053 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 1054 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 1055 * This is a continuously advancing counter. It will wrap (overflow) periodically, 1056 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 1057 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 1058 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 1059 * the total number of frames played since reset, 1060 * <i>not</i> the current offset within the buffer. 1061 */ 1062 public int getPlaybackHeadPosition() { 1063 return native_get_position(); 1064 } 1065 1066 /** 1067 * Returns this track's estimated latency in milliseconds. This includes the latency due 1068 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 1069 * 1070 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 1071 * a better solution. 1072 * @hide 1073 */ 1074 public int getLatency() { 1075 return native_get_latency(); 1076 } 1077 1078 /** 1079 * Returns the output sample rate in Hz for the specified stream type. 1080 */ 1081 static public int getNativeOutputSampleRate(int streamType) { 1082 return native_get_output_sample_rate(streamType); 1083 } 1084 1085 /** 1086 * Returns the minimum buffer size required for the successful creation of an AudioTrack 1087 * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't 1088 * guarantee a smooth playback under load, and higher values should be chosen according to 1089 * the expected frequency at which the buffer will be refilled with additional data to play. 1090 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 1091 * to a higher value than the initial source sample rate, be sure to configure the buffer size 1092 * based on the highest planned sample rate. 1093 * @param sampleRateInHz the source sample rate expressed in Hz. 1094 * @param channelConfig describes the configuration of the audio channels. 1095 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 1096 * {@link AudioFormat#CHANNEL_OUT_STEREO} 1097 * @param audioFormat the format in which the audio data is represented. 1098 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 1099 * {@link AudioFormat#ENCODING_PCM_8BIT}, 1100 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1101 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 1102 * or {@link #ERROR} if unable to query for output properties, 1103 * or the minimum buffer size expressed in bytes. 1104 */ 1105 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 1106 int channelCount = 0; 1107 switch(channelConfig) { 1108 case AudioFormat.CHANNEL_OUT_MONO: 1109 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1110 channelCount = 1; 1111 break; 1112 case AudioFormat.CHANNEL_OUT_STEREO: 1113 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1114 channelCount = 2; 1115 break; 1116 default: 1117 if (!isMultichannelConfigSupported(channelConfig)) { 1118 loge("getMinBufferSize(): Invalid channel configuration."); 1119 return ERROR_BAD_VALUE; 1120 } else { 1121 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1122 } 1123 } 1124 1125 if (!AudioFormat.isValidEncoding(audioFormat)) { 1126 loge("getMinBufferSize(): Invalid audio format."); 1127 return ERROR_BAD_VALUE; 1128 } 1129 1130 // sample rate, note these values are subject to change 1131 if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) { 1132 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 1133 return ERROR_BAD_VALUE; 1134 } 1135 1136 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 1137 if (size <= 0) { 1138 loge("getMinBufferSize(): error querying hardware"); 1139 return ERROR; 1140 } 1141 else { 1142 return size; 1143 } 1144 } 1145 1146 /** 1147 * Returns the audio session ID. 1148 * 1149 * @return the ID of the audio session this AudioTrack belongs to. 1150 */ 1151 public int getAudioSessionId() { 1152 return mSessionId; 1153 } 1154 1155 /** 1156 * Poll for a timestamp on demand. 1157 * <p> 1158 * If you need to track timestamps during initial warmup or after a routing or mode change, 1159 * you should request a new timestamp once per second until the reported timestamps 1160 * show that the audio clock is stable. 1161 * Thereafter, query for a new timestamp approximately once every 10 seconds to once per minute. 1162 * Calling this method more often is inefficient. 1163 * It is also counter-productive to call this method more often than recommended, 1164 * because the short-term differences between successive timestamp reports are not meaningful. 1165 * If you need a high-resolution mapping between frame position and presentation time, 1166 * consider implementing that at application level, based on low-resolution timestamps. 1167 * <p> 1168 * The audio data at the returned position may either already have been 1169 * presented, or may have not yet been presented but is committed to be presented. 1170 * It is not possible to request the time corresponding to a particular position, 1171 * or to request the (fractional) position corresponding to a particular time. 1172 * If you need such features, consider implementing them at application level. 1173 * 1174 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1175 * and owned by caller. 1176 * @return true if a timestamp is available, or false if no timestamp is available. 1177 * If a timestamp if available, 1178 * the AudioTimestamp instance is filled in with a position in frame units, together 1179 * with the estimated time when that frame was presented or is committed to 1180 * be presented. 1181 * In the case that no timestamp is available, any supplied instance is left unaltered. 1182 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 1183 * or during and immediately after a route change. 1184 */ 1185 // Add this text when the "on new timestamp" API is added: 1186 // Use if you need to get the most recent timestamp outside of the event callback handler. 1187 public boolean getTimestamp(AudioTimestamp timestamp) 1188 { 1189 if (timestamp == null) { 1190 throw new IllegalArgumentException(); 1191 } 1192 // It's unfortunate, but we have to either create garbage every time or use synchronized 1193 long[] longArray = new long[2]; 1194 int ret = native_get_timestamp(longArray); 1195 if (ret != SUCCESS) { 1196 return false; 1197 } 1198 timestamp.framePosition = longArray[0]; 1199 timestamp.nanoTime = longArray[1]; 1200 return true; 1201 } 1202 1203 1204 //-------------------------------------------------------------------------- 1205 // Initialization / configuration 1206 //-------------------- 1207 /** 1208 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1209 * for each periodic playback head position update. 1210 * Notifications will be received in the same thread as the one in which the AudioTrack 1211 * instance was created. 1212 * @param listener 1213 */ 1214 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 1215 setPlaybackPositionUpdateListener(listener, null); 1216 } 1217 1218 /** 1219 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1220 * for each periodic playback head position update. 1221 * Use this method to receive AudioTrack events in the Handler associated with another 1222 * thread than the one in which you created the AudioTrack instance. 1223 * @param listener 1224 * @param handler the Handler that will receive the event notification messages. 1225 */ 1226 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 1227 Handler handler) { 1228 if (listener != null) { 1229 mEventHandlerDelegate = new NativeEventHandlerDelegate(this, listener, handler); 1230 } else { 1231 mEventHandlerDelegate = null; 1232 } 1233 } 1234 1235 1236 private static float clampGainOrLevel(float gainOrLevel) { 1237 if (Float.isNaN(gainOrLevel)) { 1238 throw new IllegalArgumentException(); 1239 } 1240 if (gainOrLevel < GAIN_MIN) { 1241 gainOrLevel = GAIN_MIN; 1242 } else if (gainOrLevel > GAIN_MAX) { 1243 gainOrLevel = GAIN_MAX; 1244 } 1245 return gainOrLevel; 1246 } 1247 1248 1249 /** 1250 * Sets the specified left and right output gain values on the AudioTrack. 1251 * <p>Gain values are clamped to the closed interval [0.0, max] where 1252 * max is the value of {@link #getMaxVolume}. 1253 * A value of 0.0 results in zero gain (silence), and 1254 * a value of 1.0 means unity gain (signal unchanged). 1255 * The default value is 1.0 meaning unity gain. 1256 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1257 * @param leftGain output gain for the left channel. 1258 * @param rightGain output gain for the right channel 1259 * @return error code or success, see {@link #SUCCESS}, 1260 * {@link #ERROR_INVALID_OPERATION} 1261 * @deprecated Applications should use {@link #setVolume} instead, as it 1262 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1263 */ 1264 public int setStereoVolume(float leftGain, float rightGain) { 1265 if (isRestricted()) { 1266 return SUCCESS; 1267 } 1268 if (mState == STATE_UNINITIALIZED) { 1269 return ERROR_INVALID_OPERATION; 1270 } 1271 1272 leftGain = clampGainOrLevel(leftGain); 1273 rightGain = clampGainOrLevel(rightGain); 1274 1275 native_setVolume(leftGain, rightGain); 1276 1277 return SUCCESS; 1278 } 1279 1280 1281 /** 1282 * Sets the specified output gain value on all channels of this track. 1283 * <p>Gain values are clamped to the closed interval [0.0, max] where 1284 * max is the value of {@link #getMaxVolume}. 1285 * A value of 0.0 results in zero gain (silence), and 1286 * a value of 1.0 means unity gain (signal unchanged). 1287 * The default value is 1.0 meaning unity gain. 1288 * <p>This API is preferred over {@link #setStereoVolume}, as it 1289 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 1290 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1291 * @param gain output gain for all channels. 1292 * @return error code or success, see {@link #SUCCESS}, 1293 * {@link #ERROR_INVALID_OPERATION} 1294 */ 1295 public int setVolume(float gain) { 1296 return setStereoVolume(gain, gain); 1297 } 1298 1299 1300 /** 1301 * Sets the playback sample rate for this track. This sets the sampling rate at which 1302 * the audio data will be consumed and played back 1303 * (as set by the sampleRateInHz parameter in the 1304 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 1305 * not the original sampling rate of the 1306 * content. For example, setting it to half the sample rate of the content will cause the 1307 * playback to last twice as long, but will also result in a pitch shift down by one octave. 1308 * The valid sample rate range is from 1 Hz to twice the value returned by 1309 * {@link #getNativeOutputSampleRate(int)}. 1310 * @param sampleRateInHz the sample rate expressed in Hz 1311 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1312 * {@link #ERROR_INVALID_OPERATION} 1313 */ 1314 public int setPlaybackRate(int sampleRateInHz) { 1315 if (mState != STATE_INITIALIZED) { 1316 return ERROR_INVALID_OPERATION; 1317 } 1318 if (sampleRateInHz <= 0) { 1319 return ERROR_BAD_VALUE; 1320 } 1321 return native_set_playback_rate(sampleRateInHz); 1322 } 1323 1324 1325 /** 1326 * Sets the position of the notification marker. At most one marker can be active. 1327 * @param markerInFrames marker position in wrapping frame units similar to 1328 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 1329 * To set a marker at a position which would appear as zero due to wraparound, 1330 * a workaround is to use a non-zero position near zero, such as -1 or 1. 1331 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1332 * {@link #ERROR_INVALID_OPERATION} 1333 */ 1334 public int setNotificationMarkerPosition(int markerInFrames) { 1335 if (mState == STATE_UNINITIALIZED) { 1336 return ERROR_INVALID_OPERATION; 1337 } 1338 return native_set_marker_pos(markerInFrames); 1339 } 1340 1341 1342 /** 1343 * Sets the period for the periodic notification event. 1344 * @param periodInFrames update period expressed in frames. 1345 * Zero period means no position updates. A negative period is not allowed. 1346 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 1347 */ 1348 public int setPositionNotificationPeriod(int periodInFrames) { 1349 if (mState == STATE_UNINITIALIZED) { 1350 return ERROR_INVALID_OPERATION; 1351 } 1352 return native_set_pos_update_period(periodInFrames); 1353 } 1354 1355 1356 /** 1357 * Sets the playback head position within the static buffer. 1358 * The track must be stopped or paused for the position to be changed, 1359 * and must use the {@link #MODE_STATIC} mode. 1360 * @param positionInFrames playback head position within buffer, expressed in frames. 1361 * Zero corresponds to start of buffer. 1362 * The position must not be greater than the buffer size in frames, or negative. 1363 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 1364 * the position values have different meanings. 1365 * <br> 1366 * If looping is currently enabled and the new position is greater than or equal to the 1367 * loop end marker, the behavior varies by API level: for API level 22 and above, 1368 * the looping is first disabled and then the position is set. 1369 * For earlier API levels, the behavior is unspecified. 1370 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1371 * {@link #ERROR_INVALID_OPERATION} 1372 */ 1373 public int setPlaybackHeadPosition(int positionInFrames) { 1374 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1375 getPlayState() == PLAYSTATE_PLAYING) { 1376 return ERROR_INVALID_OPERATION; 1377 } 1378 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 1379 return ERROR_BAD_VALUE; 1380 } 1381 return native_set_position(positionInFrames); 1382 } 1383 1384 /** 1385 * Sets the loop points and the loop count. The loop can be infinite. 1386 * Similarly to setPlaybackHeadPosition, 1387 * the track must be stopped or paused for the loop points to be changed, 1388 * and must use the {@link #MODE_STATIC} mode. 1389 * @param startInFrames loop start marker expressed in frames. 1390 * Zero corresponds to start of buffer. 1391 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 1392 * @param endInFrames loop end marker expressed in frames. 1393 * The total buffer size in frames corresponds to end of buffer. 1394 * The end marker must not be greater than the buffer size in frames. 1395 * For looping, the end marker must not be less than or equal to the start marker, 1396 * but to disable looping 1397 * it is permitted for start marker, end marker, and loop count to all be 0. 1398 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 1399 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 1400 * support, 1401 * {@link #ERROR_BAD_VALUE} is returned. 1402 * The loop range is the interval [startInFrames, endInFrames). 1403 * <br> 1404 * For API level 22 and above, the position is left unchanged, 1405 * unless it is greater than or equal to the loop end marker, in which case 1406 * it is forced to the loop start marker. 1407 * For earlier API levels, the effect on position is unspecified. 1408 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 1409 * A value of -1 means infinite looping, and 0 disables looping. 1410 * A value of positive N means to "loop" (go back) N times. For example, 1411 * a value of one means to play the region two times in total. 1412 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1413 * {@link #ERROR_INVALID_OPERATION} 1414 */ 1415 public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) { 1416 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 1417 getPlayState() == PLAYSTATE_PLAYING) { 1418 return ERROR_INVALID_OPERATION; 1419 } 1420 if (loopCount == 0) { 1421 ; // explicitly allowed as an exception to the loop region range check 1422 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 1423 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 1424 return ERROR_BAD_VALUE; 1425 } 1426 return native_set_loop(startInFrames, endInFrames, loopCount); 1427 } 1428 1429 /** 1430 * Sets the initialization state of the instance. This method was originally intended to be used 1431 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 1432 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 1433 * @param state the state of the AudioTrack instance 1434 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 1435 */ 1436 @Deprecated 1437 protected void setState(int state) { 1438 mState = state; 1439 } 1440 1441 1442 //--------------------------------------------------------- 1443 // Transport control methods 1444 //-------------------- 1445 /** 1446 * Starts playing an AudioTrack. 1447 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 1448 * the {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 1449 * or {@link #write(float[], int, int, int)} methods. 1450 * If the mode is {@link #MODE_STREAM}, you can optionally prime the 1451 * output buffer by writing up to bufferSizeInBytes (from constructor) before starting. 1452 * This priming will avoid an immediate underrun, but is not required. 1453 * 1454 * @throws IllegalStateException 1455 */ 1456 public void play() 1457 throws IllegalStateException { 1458 if (mState != STATE_INITIALIZED) { 1459 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 1460 } 1461 if (isRestricted()) { 1462 setVolume(0); 1463 } 1464 synchronized(mPlayStateLock) { 1465 native_start(); 1466 mPlayState = PLAYSTATE_PLAYING; 1467 } 1468 } 1469 1470 private boolean isRestricted() { 1471 if ((mAttributes.getFlags() & AudioAttributes.FLAG_BYPASS_INTERRUPTION_POLICY) != 0) { 1472 return false; 1473 } 1474 try { 1475 final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType); 1476 final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage, 1477 Process.myUid(), ActivityThread.currentPackageName()); 1478 return mode != AppOpsManager.MODE_ALLOWED; 1479 } catch (RemoteException e) { 1480 return false; 1481 } 1482 } 1483 1484 /** 1485 * Stops playing the audio data. 1486 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 1487 * after the last buffer that was written has been played. For an immediate stop, use 1488 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 1489 * back yet. 1490 * @throws IllegalStateException 1491 */ 1492 public void stop() 1493 throws IllegalStateException { 1494 if (mState != STATE_INITIALIZED) { 1495 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 1496 } 1497 1498 // stop playing 1499 synchronized(mPlayStateLock) { 1500 native_stop(); 1501 mPlayState = PLAYSTATE_STOPPED; 1502 mAvSyncHeader = null; 1503 mAvSyncBytesRemaining = 0; 1504 } 1505 } 1506 1507 /** 1508 * Pauses the playback of the audio data. Data that has not been played 1509 * back will not be discarded. Subsequent calls to {@link #play} will play 1510 * this data back. See {@link #flush()} to discard this data. 1511 * 1512 * @throws IllegalStateException 1513 */ 1514 public void pause() 1515 throws IllegalStateException { 1516 if (mState != STATE_INITIALIZED) { 1517 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 1518 } 1519 //logd("pause()"); 1520 1521 // pause playback 1522 synchronized(mPlayStateLock) { 1523 native_pause(); 1524 mPlayState = PLAYSTATE_PAUSED; 1525 } 1526 } 1527 1528 1529 //--------------------------------------------------------- 1530 // Audio data supply 1531 //-------------------- 1532 1533 /** 1534 * Flushes the audio data currently queued for playback. Any data that has 1535 * been written but not yet presented will be discarded. No-op if not stopped or paused, 1536 * or if the track's creation mode is not {@link #MODE_STREAM}. 1537 * <BR> Note that although data written but not yet presented is discarded, there is no 1538 * guarantee that all of the buffer space formerly used by that data 1539 * is available for a subsequent write. 1540 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 1541 * less than or equal to the total buffer size 1542 * may return a short actual transfer count. 1543 */ 1544 public void flush() { 1545 if (mState == STATE_INITIALIZED) { 1546 // flush the data in native layer 1547 native_flush(); 1548 mAvSyncHeader = null; 1549 mAvSyncBytesRemaining = 0; 1550 } 1551 1552 } 1553 1554 /** 1555 * Writes the audio data to the audio sink for playback (streaming mode), 1556 * or copies audio data for later playback (static buffer mode). 1557 * The format specified in the AudioTrack constructor should be 1558 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1559 * In streaming mode, will block until all data has been written to the audio sink. 1560 * In static buffer mode, copies the data to the buffer starting at offset 0. 1561 * Note that the actual playback of this data might occur after this function 1562 * returns. This function is thread safe with respect to {@link #stop} calls, 1563 * in which case all of the specified data might not be written to the audio sink. 1564 * 1565 * @param audioData the array that holds the data to play. 1566 * @param offsetInBytes the offset expressed in bytes in audioData where the data to play 1567 * starts. 1568 * @param sizeInBytes the number of bytes to read in audioData after the offset. 1569 * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION} 1570 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1571 * the parameters don't resolve to valid data and indexes, or 1572 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1573 * needs to be recreated. 1574 */ 1575 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) { 1576 return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING); 1577 } 1578 1579 /** 1580 * Writes the audio data to the audio sink for playback (streaming mode), 1581 * or copies audio data for later playback (static buffer mode). 1582 * The format specified in the AudioTrack constructor should be 1583 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 1584 * In streaming mode, will block until all data has been written to the audio sink. 1585 * In static buffer mode, copies the data to the buffer starting at offset 0. 1586 * Note that the actual playback of this data might occur after this function 1587 * returns. This function is thread safe with respect to {@link #stop} calls, 1588 * in which case all of the specified data might not be written to the audio sink. 1589 * 1590 * @param audioData the array that holds the data to play. 1591 * @param offsetInBytes the offset expressed in bytes in audioData where the data to play 1592 * starts. 1593 * @param sizeInBytes the number of bytes to read in audioData after the offset. 1594 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1595 * effect in static mode. 1596 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1597 * to the audio sink. 1598 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1599 * queuing as much audio data for playback as possible without blocking. 1600 * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION} 1601 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1602 * the parameters don't resolve to valid data and indexes, or 1603 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1604 * needs to be recreated. 1605 */ 1606 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes, 1607 @WriteMode int writeMode) { 1608 1609 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1610 return ERROR_INVALID_OPERATION; 1611 } 1612 1613 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1614 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1615 return ERROR_BAD_VALUE; 1616 } 1617 1618 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 1619 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 1620 || (offsetInBytes + sizeInBytes > audioData.length)) { 1621 return ERROR_BAD_VALUE; 1622 } 1623 1624 int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 1625 writeMode == WRITE_BLOCKING); 1626 1627 if ((mDataLoadMode == MODE_STATIC) 1628 && (mState == STATE_NO_STATIC_DATA) 1629 && (ret > 0)) { 1630 // benign race with respect to other APIs that read mState 1631 mState = STATE_INITIALIZED; 1632 } 1633 1634 return ret; 1635 } 1636 1637 /** 1638 * Writes the audio data to the audio sink for playback (streaming mode), 1639 * or copies audio data for later playback (static buffer mode). 1640 * The format specified in the AudioTrack constructor should be 1641 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1642 * In streaming mode, will block until all data has been written to the audio sink. 1643 * In static buffer mode, copies the data to the buffer starting at offset 0. 1644 * Note that the actual playback of this data might occur after this function 1645 * returns. This function is thread safe with respect to {@link #stop} calls, 1646 * in which case all of the specified data might not be written to the audio sink. 1647 * 1648 * @param audioData the array that holds the data to play. 1649 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1650 * starts. 1651 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1652 * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION} 1653 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1654 * the parameters don't resolve to valid data and indexes, or 1655 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1656 * needs to be recreated. 1657 */ 1658 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) { 1659 return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING); 1660 } 1661 1662 /** 1663 * Writes the audio data to the audio sink for playback (streaming mode), 1664 * or copies audio data for later playback (static buffer mode). 1665 * The format specified in the AudioTrack constructor should be 1666 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 1667 * In streaming mode, will block until all data has been written to the audio sink. 1668 * In static buffer mode, copies the data to the buffer starting at offset 0. 1669 * Note that the actual playback of this data might occur after this function 1670 * returns. This function is thread safe with respect to {@link #stop} calls, 1671 * in which case all of the specified data might not be written to the audio sink. 1672 * 1673 * @param audioData the array that holds the data to play. 1674 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 1675 * starts. 1676 * @param sizeInShorts the number of shorts to read in audioData after the offset. 1677 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1678 * effect in static mode. 1679 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1680 * to the audio sink. 1681 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1682 * queuing as much audio data for playback as possible without blocking. 1683 * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION} 1684 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1685 * the parameters don't resolve to valid data and indexes, or 1686 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1687 * needs to be recreated. 1688 */ 1689 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts, 1690 @WriteMode int writeMode) { 1691 1692 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 1693 return ERROR_INVALID_OPERATION; 1694 } 1695 1696 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1697 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1698 return ERROR_BAD_VALUE; 1699 } 1700 1701 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 1702 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 1703 || (offsetInShorts + sizeInShorts > audioData.length)) { 1704 return ERROR_BAD_VALUE; 1705 } 1706 1707 int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat, 1708 writeMode == WRITE_BLOCKING); 1709 1710 if ((mDataLoadMode == MODE_STATIC) 1711 && (mState == STATE_NO_STATIC_DATA) 1712 && (ret > 0)) { 1713 // benign race with respect to other APIs that read mState 1714 mState = STATE_INITIALIZED; 1715 } 1716 1717 return ret; 1718 } 1719 1720 /** 1721 * Writes the audio data to the audio sink for playback (streaming mode), 1722 * or copies audio data for later playback (static buffer mode). 1723 * The format specified in the AudioTrack constructor should be 1724 * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array. 1725 * In static buffer mode, copies the data to the buffer starting at offset 0, 1726 * and the write mode is ignored. 1727 * In streaming mode, the blocking behavior will depend on the write mode. 1728 * <p> 1729 * Note that the actual playback of this data might occur after this function 1730 * returns. This function is thread safe with respect to {@link #stop} calls, 1731 * in which case all of the specified data might not be written to the audio sink. 1732 * <p> 1733 * @param audioData the array that holds the data to play. 1734 * The implementation does not clip for sample values within the nominal range 1735 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 1736 * less than or equal to unity (1.0f), and in the absence of post-processing effects 1737 * that could add energy, such as reverb. For the convenience of applications 1738 * that compute samples using filters with non-unity gain, 1739 * sample values +3 dB beyond the nominal range are permitted. 1740 * However such values may eventually be limited or clipped, depending on various gains 1741 * and later processing in the audio path. Therefore applications are encouraged 1742 * to provide samples values within the nominal range. 1743 * @param offsetInFloats the offset, expressed as a number of floats, 1744 * in audioData where the data to play starts. 1745 * @param sizeInFloats the number of floats to read in audioData after the offset. 1746 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1747 * effect in static mode. 1748 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1749 * to the audio sink. 1750 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1751 * queuing as much audio data for playback as possible without blocking. 1752 * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION} 1753 * if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if 1754 * the parameters don't resolve to valid data and indexes, or 1755 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1756 * needs to be recreated. 1757 */ 1758 public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats, 1759 @WriteMode int writeMode) { 1760 1761 if (mState == STATE_UNINITIALIZED) { 1762 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 1763 return ERROR_INVALID_OPERATION; 1764 } 1765 1766 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 1767 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 1768 return ERROR_INVALID_OPERATION; 1769 } 1770 1771 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1772 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1773 return ERROR_BAD_VALUE; 1774 } 1775 1776 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 1777 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 1778 || (offsetInFloats + sizeInFloats > audioData.length)) { 1779 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 1780 return ERROR_BAD_VALUE; 1781 } 1782 1783 int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 1784 writeMode == WRITE_BLOCKING); 1785 1786 if ((mDataLoadMode == MODE_STATIC) 1787 && (mState == STATE_NO_STATIC_DATA) 1788 && (ret > 0)) { 1789 // benign race with respect to other APIs that read mState 1790 mState = STATE_INITIALIZED; 1791 } 1792 1793 return ret; 1794 } 1795 1796 1797 /** 1798 * Writes the audio data to the audio sink for playback (streaming mode), 1799 * or copies audio data for later playback (static buffer mode). 1800 * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write 1801 * mode is ignored. 1802 * In streaming mode, the blocking behavior will depend on the write mode. 1803 * @param audioData the buffer that holds the data to play, starting at the position reported 1804 * by <code>audioData.position()</code>. 1805 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 1806 * have been advanced to reflect the amount of data that was successfully written to 1807 * the AudioTrack. 1808 * @param sizeInBytes number of bytes to write. 1809 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 1810 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 1811 * effect in static mode. 1812 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1813 * to the audio sink. 1814 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1815 * queuing as much audio data for playback as possible without blocking. 1816 * @return 0 or a positive number of bytes that were written, or 1817 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 1818 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1819 * needs to be recreated. 1820 */ 1821 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 1822 @WriteMode int writeMode) { 1823 1824 if (mState == STATE_UNINITIALIZED) { 1825 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 1826 return ERROR_INVALID_OPERATION; 1827 } 1828 1829 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 1830 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 1831 return ERROR_BAD_VALUE; 1832 } 1833 1834 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 1835 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 1836 return ERROR_BAD_VALUE; 1837 } 1838 1839 int ret = 0; 1840 if (audioData.isDirect()) { 1841 ret = native_write_native_bytes(audioData, 1842 audioData.position(), sizeInBytes, mAudioFormat, 1843 writeMode == WRITE_BLOCKING); 1844 } else { 1845 ret = native_write_byte(NioUtils.unsafeArray(audioData), 1846 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 1847 sizeInBytes, mAudioFormat, 1848 writeMode == WRITE_BLOCKING); 1849 } 1850 1851 if ((mDataLoadMode == MODE_STATIC) 1852 && (mState == STATE_NO_STATIC_DATA) 1853 && (ret > 0)) { 1854 // benign race with respect to other APIs that read mState 1855 mState = STATE_INITIALIZED; 1856 } 1857 1858 if (ret > 0) { 1859 audioData.position(audioData.position() + ret); 1860 } 1861 1862 return ret; 1863 } 1864 1865 /** 1866 * Writes the audio data to the audio sink for playback (streaming mode) on a HW_AV_SYNC track. 1867 * In streaming mode, the blocking behavior will depend on the write mode. 1868 * @param audioData the buffer that holds the data to play, starting at the position reported 1869 * by <code>audioData.position()</code>. 1870 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 1871 * have been advanced to reflect the amount of data that was successfully written to 1872 * the AudioTrack. 1873 * @param sizeInBytes number of bytes to write. 1874 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 1875 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. 1876 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 1877 * to the audio sink. 1878 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 1879 * queuing as much audio data for playback as possible without blocking. 1880 * @param timestamp The timestamp of the first decodable audio frame in the provided audioData. 1881 * @return 0 or a positive number of bytes that were written, or 1882 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or 1883 * {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1884 * needs to be recreated. 1885 */ 1886 public int write(ByteBuffer audioData, int sizeInBytes, 1887 @WriteMode int writeMode, long timestamp) { 1888 1889 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) { 1890 Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts..."); 1891 return write(audioData, sizeInBytes, writeMode); 1892 } 1893 1894 if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 1895 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 1896 return ERROR_BAD_VALUE; 1897 } 1898 1899 // create timestamp header if none exists 1900 if (mAvSyncHeader == null) { 1901 mAvSyncHeader = ByteBuffer.allocate(16); 1902 mAvSyncHeader.order(ByteOrder.BIG_ENDIAN); 1903 mAvSyncHeader.putInt(0x55550001); 1904 mAvSyncHeader.putInt(sizeInBytes); 1905 mAvSyncHeader.putLong(timestamp); 1906 mAvSyncHeader.position(0); 1907 mAvSyncBytesRemaining = sizeInBytes; 1908 } 1909 1910 // write timestamp header if not completely written already 1911 int ret = 0; 1912 if (mAvSyncHeader.remaining() != 0) { 1913 ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode); 1914 if (ret < 0) { 1915 Log.e(TAG, "AudioTrack.write() could not write timestamp header!"); 1916 mAvSyncHeader = null; 1917 mAvSyncBytesRemaining = 0; 1918 return ret; 1919 } 1920 if (mAvSyncHeader.remaining() > 0) { 1921 Log.v(TAG, "AudioTrack.write() partial timestamp header written."); 1922 return 0; 1923 } 1924 } 1925 1926 // write audio data 1927 int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes); 1928 ret = write(audioData, sizeToWrite, writeMode); 1929 if (ret < 0) { 1930 Log.e(TAG, "AudioTrack.write() could not write audio data!"); 1931 mAvSyncHeader = null; 1932 mAvSyncBytesRemaining = 0; 1933 return ret; 1934 } 1935 1936 mAvSyncBytesRemaining -= ret; 1937 if (mAvSyncBytesRemaining == 0) { 1938 mAvSyncHeader = null; 1939 } 1940 1941 return ret; 1942 } 1943 1944 1945 /** 1946 * Sets the playback head position within the static buffer to zero, 1947 * that is it rewinds to start of static buffer. 1948 * The track must be stopped or paused, and 1949 * the track's creation mode must be {@link #MODE_STATIC}. 1950 * <p> 1951 * For API level 22 and above, also resets the value returned by 1952 * {@link #getPlaybackHeadPosition()} to zero. 1953 * For earlier API levels, the reset behavior is unspecified. 1954 * <p> 1955 * {@link #setPlaybackHeadPosition(int)} to zero 1956 * is recommended instead when the reset of {@link #getPlaybackHeadPosition} is not needed. 1957 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 1958 * {@link #ERROR_INVALID_OPERATION} 1959 */ 1960 public int reloadStaticData() { 1961 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 1962 return ERROR_INVALID_OPERATION; 1963 } 1964 return native_reload_static(); 1965 } 1966 1967 //-------------------------------------------------------------------------- 1968 // Audio effects management 1969 //-------------------- 1970 1971 /** 1972 * Attaches an auxiliary effect to the audio track. A typical auxiliary 1973 * effect is a reverberation effect which can be applied on any sound source 1974 * that directs a certain amount of its energy to this effect. This amount 1975 * is defined by setAuxEffectSendLevel(). 1976 * {@see #setAuxEffectSendLevel(float)}. 1977 * <p>After creating an auxiliary effect (e.g. 1978 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 1979 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 1980 * this method to attach the audio track to the effect. 1981 * <p>To detach the effect from the audio track, call this method with a 1982 * null effect id. 1983 * 1984 * @param effectId system wide unique id of the effect to attach 1985 * @return error code or success, see {@link #SUCCESS}, 1986 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 1987 */ 1988 public int attachAuxEffect(int effectId) { 1989 if (mState == STATE_UNINITIALIZED) { 1990 return ERROR_INVALID_OPERATION; 1991 } 1992 return native_attachAuxEffect(effectId); 1993 } 1994 1995 /** 1996 * Sets the send level of the audio track to the attached auxiliary effect 1997 * {@link #attachAuxEffect(int)}. Effect levels 1998 * are clamped to the closed interval [0.0, max] where 1999 * max is the value of {@link #getMaxVolume}. 2000 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 2001 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 2002 * this method must be called for the effect to be applied. 2003 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 2004 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 2005 * so an appropriate conversion from linear UI input x to level is: 2006 * x == 0 -> level = 0 2007 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 2008 * 2009 * @param level linear send level 2010 * @return error code or success, see {@link #SUCCESS}, 2011 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 2012 */ 2013 public int setAuxEffectSendLevel(float level) { 2014 if (isRestricted()) { 2015 return SUCCESS; 2016 } 2017 if (mState == STATE_UNINITIALIZED) { 2018 return ERROR_INVALID_OPERATION; 2019 } 2020 level = clampGainOrLevel(level); 2021 int err = native_setAuxEffectSendLevel(level); 2022 return err == 0 ? SUCCESS : ERROR; 2023 } 2024 2025 //-------------------------------------------------------------------------- 2026 // Explicit Routing 2027 //-------------------- 2028 private AudioDeviceInfo mPreferredDevice = null; 2029 2030 /** 2031 * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route 2032 * the output from this AudioTrack. 2033 * @param deviceSpec The {@link AudioDeviceInfo} specifying the audio sink. 2034 * If deviceSpec is null, default routing is restored. 2035 * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and 2036 * does not correspond to a valid audio output device. 2037 */ 2038 public boolean setPreferredOutputDevice(AudioDeviceInfo deviceInfo) { 2039 // Do some validation.... 2040 if (deviceInfo != null && !deviceInfo.isSink()) { 2041 return false; 2042 } 2043 2044 mPreferredDevice = deviceInfo; 2045 int preferredDeviceId = mPreferredDevice != null ? deviceInfo.getId() : 0; 2046 2047 return native_setOutputDevice(preferredDeviceId); 2048 } 2049 2050 /** 2051 * Returns the selected output specified by {@link #setPreferredOutputDevice}. Note that this 2052 * is not guaranteed to correspond to the actual device being used for playback. 2053 */ 2054 public AudioDeviceInfo getPreferredOutputDevice() { 2055 return mPreferredDevice; 2056 } 2057 2058 //--------------------------------------------------------- 2059 // Interface definitions 2060 //-------------------- 2061 /** 2062 * Interface definition for a callback to be invoked when the playback head position of 2063 * an AudioTrack has reached a notification marker or has increased by a certain period. 2064 */ 2065 public interface OnPlaybackPositionUpdateListener { 2066 /** 2067 * Called on the listener to notify it that the previously set marker has been reached 2068 * by the playback head. 2069 */ 2070 void onMarkerReached(AudioTrack track); 2071 2072 /** 2073 * Called on the listener to periodically notify it that the playback head has reached 2074 * a multiple of the notification period. 2075 */ 2076 void onPeriodicNotification(AudioTrack track); 2077 } 2078 2079 //--------------------------------------------------------- 2080 // Inner classes 2081 //-------------------- 2082 /** 2083 * Helper class to handle the forwarding of native events to the appropriate listener 2084 * (potentially) handled in a different thread 2085 */ 2086 private class NativeEventHandlerDelegate { 2087 private final Handler mHandler; 2088 2089 NativeEventHandlerDelegate(final AudioTrack track, 2090 final OnPlaybackPositionUpdateListener listener, 2091 Handler handler) { 2092 // find the looper for our new event handler 2093 Looper looper; 2094 if (handler != null) { 2095 looper = handler.getLooper(); 2096 } else { 2097 // no given handler, use the looper the AudioTrack was created in 2098 looper = mInitializationLooper; 2099 } 2100 2101 // construct the event handler with this looper 2102 if (looper != null) { 2103 // implement the event handler delegate 2104 mHandler = new Handler(looper) { 2105 @Override 2106 public void handleMessage(Message msg) { 2107 if (track == null) { 2108 return; 2109 } 2110 switch(msg.what) { 2111 case NATIVE_EVENT_MARKER: 2112 if (listener != null) { 2113 listener.onMarkerReached(track); 2114 } 2115 break; 2116 case NATIVE_EVENT_NEW_POS: 2117 if (listener != null) { 2118 listener.onPeriodicNotification(track); 2119 } 2120 break; 2121 default: 2122 loge("Unknown native event type: " + msg.what); 2123 break; 2124 } 2125 } 2126 }; 2127 } else { 2128 mHandler = null; 2129 } 2130 } 2131 2132 Handler getHandler() { 2133 return mHandler; 2134 } 2135 } 2136 2137 2138 //--------------------------------------------------------- 2139 // Java methods called from the native side 2140 //-------------------- 2141 @SuppressWarnings("unused") 2142 private static void postEventFromNative(Object audiotrack_ref, 2143 int what, int arg1, int arg2, Object obj) { 2144 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 2145 AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get(); 2146 if (track == null) { 2147 return; 2148 } 2149 2150 NativeEventHandlerDelegate delegate = track.mEventHandlerDelegate; 2151 if (delegate != null) { 2152 Handler handler = delegate.getHandler(); 2153 if (handler != null) { 2154 Message m = handler.obtainMessage(what, arg1, arg2, obj); 2155 handler.sendMessage(m); 2156 } 2157 } 2158 2159 } 2160 2161 2162 //--------------------------------------------------------- 2163 // Native methods called from the Java side 2164 //-------------------- 2165 2166 // post-condition: mStreamType is overwritten with a value 2167 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 2168 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC 2169 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 2170 Object /*AudioAttributes*/ attributes, 2171 int sampleRate, int channelMask, int audioFormat, 2172 int buffSizeInBytes, int mode, int[] sessionId); 2173 2174 private native final void native_finalize(); 2175 2176 private native final void native_release(); 2177 2178 private native final void native_start(); 2179 2180 private native final void native_stop(); 2181 2182 private native final void native_pause(); 2183 2184 private native final void native_flush(); 2185 2186 private native final int native_write_byte(byte[] audioData, 2187 int offsetInBytes, int sizeInBytes, int format, 2188 boolean isBlocking); 2189 2190 private native final int native_write_short(short[] audioData, 2191 int offsetInShorts, int sizeInShorts, int format, 2192 boolean isBlocking); 2193 2194 private native final int native_write_float(float[] audioData, 2195 int offsetInFloats, int sizeInFloats, int format, 2196 boolean isBlocking); 2197 2198 private native final int native_write_native_bytes(Object audioData, 2199 int positionInBytes, int sizeInBytes, int format, boolean blocking); 2200 2201 private native final int native_reload_static(); 2202 2203 private native final int native_get_native_frame_count(); 2204 2205 private native final void native_setVolume(float leftVolume, float rightVolume); 2206 2207 private native final int native_set_playback_rate(int sampleRateInHz); 2208 private native final int native_get_playback_rate(); 2209 2210 private native final int native_set_marker_pos(int marker); 2211 private native final int native_get_marker_pos(); 2212 2213 private native final int native_set_pos_update_period(int updatePeriod); 2214 private native final int native_get_pos_update_period(); 2215 2216 private native final int native_set_position(int position); 2217 private native final int native_get_position(); 2218 2219 private native final int native_get_latency(); 2220 2221 // longArray must be a non-null array of length >= 2 2222 // [0] is assigned the frame position 2223 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds 2224 private native final int native_get_timestamp(long[] longArray); 2225 2226 private native final int native_set_loop(int start, int end, int loopCount); 2227 2228 static private native final int native_get_output_sample_rate(int streamType); 2229 static private native final int native_get_min_buff_size( 2230 int sampleRateInHz, int channelConfig, int audioFormat); 2231 2232 private native final int native_attachAuxEffect(int effectId); 2233 private native final int native_setAuxEffectSendLevel(float level); 2234 2235 private native final boolean native_setOutputDevice(int deviceId); 2236 2237 //--------------------------------------------------------- 2238 // Utility methods 2239 //------------------ 2240 2241 private static void logd(String msg) { 2242 Log.d(TAG, msg); 2243 } 2244 2245 private static void loge(String msg) { 2246 Log.e(TAG, msg); 2247 } 2248} 2249