1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import android.annotation.CallbackExecutor;
20 import android.annotation.FloatRange;
21 import android.annotation.IntDef;
22 import android.annotation.IntRange;
23 import android.annotation.NonNull;
24 import android.annotation.Nullable;
25 import android.annotation.RequiresPermission;
26 import android.annotation.SystemApi;
27 import android.annotation.TestApi;
28 import android.compat.annotation.UnsupportedAppUsage;
29 import android.media.metrics.LogSessionId;
30 import android.os.Binder;
31 import android.os.Build;
32 import android.os.Handler;
33 import android.os.HandlerThread;
34 import android.os.Looper;
35 import android.os.Message;
36 import android.os.PersistableBundle;
37 import android.util.ArrayMap;
38 import android.util.Log;
39 
40 import com.android.internal.annotations.GuardedBy;
41 
42 import java.lang.annotation.Retention;
43 import java.lang.annotation.RetentionPolicy;
44 import java.lang.ref.WeakReference;
45 import java.nio.ByteBuffer;
46 import java.nio.ByteOrder;
47 import java.nio.NioUtils;
48 import java.util.HashMap;
49 import java.util.LinkedList;
50 import java.util.Objects;
51 import java.util.concurrent.Executor;
52 
53 /**
54  * The AudioTrack class manages and plays a single audio resource for Java applications.
55  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
56  * achieved by "pushing" the data to the AudioTrack object using one of the
57  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
58  *  and {@link #write(float[], int, int, int)} methods.
59  *
60  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
61  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
62  * one of the {@code write()} methods. These are blocking and return when the data has been
63  * transferred from the Java layer to the native layer and queued for playback. The streaming
64  * mode is most useful when playing blocks of audio data that for instance are:
65  *
66  * <ul>
67  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
68  *   <li>too big to fit in memory because of the characteristics of the audio data
69  *         (high sampling rate, bits per sample ...)</li>
70  *   <li>received or generated while previously queued audio is playing.</li>
71  * </ul>
72  *
73  * The static mode should be chosen when dealing with short sounds that fit in memory and
74  * that need to be played with the smallest latency possible. The static mode will
75  * therefore be preferred for UI and game sounds that are played often, and with the
76  * smallest overhead possible.
77  *
78  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
79  * The size of this buffer, specified during the construction, determines how long an AudioTrack
80  * can play before running out of data.<br>
81  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
82  * be played from it.<br>
83  * For the streaming mode, data will be written to the audio sink in chunks of
84  * sizes less than or equal to the total buffer size.
85  *
86  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
87  */
88 public class AudioTrack extends PlayerBase
89                         implements AudioRouting
90                                  , VolumeAutomation
91 {
92     //---------------------------------------------------------
93     // Constants
94     //--------------------
95     /** Minimum value for a linear gain or auxiliary effect level.
96      *  This value must be exactly equal to 0.0f; do not change it.
97      */
98     private static final float GAIN_MIN = 0.0f;
99     /** Maximum value for a linear gain or auxiliary effect level.
100      *  This value must be greater than or equal to 1.0f.
101      */
102     private static final float GAIN_MAX = 1.0f;
103 
104     /** indicates AudioTrack state is stopped */
105     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
106     /** indicates AudioTrack state is paused */
107     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
108     /** indicates AudioTrack state is playing */
109     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
110     /**
111       * @hide
112       * indicates AudioTrack state is stopping waiting for NATIVE_EVENT_STREAM_END to
113       * transition to PLAYSTATE_STOPPED.
114       * Only valid for offload mode.
115       */
116     private static final int PLAYSTATE_STOPPING = 4;
117     /**
118       * @hide
119       * indicates AudioTrack state is paused from stopping state. Will transition to
120       * PLAYSTATE_STOPPING if play() is called.
121       * Only valid for offload mode.
122       */
123     private static final int PLAYSTATE_PAUSED_STOPPING = 5;
124 
125     // keep these values in sync with android_media_AudioTrack.cpp
126     /**
127      * Creation mode where audio data is transferred from Java to the native layer
128      * only once before the audio starts playing.
129      */
130     public static final int MODE_STATIC = 0;
131     /**
132      * Creation mode where audio data is streamed from Java to the native layer
133      * as the audio is playing.
134      */
135     public static final int MODE_STREAM = 1;
136 
137     /** @hide */
138     @IntDef({
139         MODE_STATIC,
140         MODE_STREAM
141     })
142     @Retention(RetentionPolicy.SOURCE)
143     public @interface TransferMode {}
144 
145     /**
146      * State of an AudioTrack that was not successfully initialized upon creation.
147      */
148     public static final int STATE_UNINITIALIZED = 0;
149     /**
150      * State of an AudioTrack that is ready to be used.
151      */
152     public static final int STATE_INITIALIZED   = 1;
153     /**
154      * State of a successfully initialized AudioTrack that uses static data,
155      * but that hasn't received that data yet.
156      */
157     public static final int STATE_NO_STATIC_DATA = 2;
158 
159     /**
160      * Denotes a successful operation.
161      */
162     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
163     /**
164      * Denotes a generic operation failure.
165      */
166     public  static final int ERROR                                 = AudioSystem.ERROR;
167     /**
168      * Denotes a failure due to the use of an invalid value.
169      */
170     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
171     /**
172      * Denotes a failure due to the improper use of a method.
173      */
174     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
175     /**
176      * An error code indicating that the object reporting it is no longer valid and needs to
177      * be recreated.
178      */
179     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
180     /**
181      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
182      * or immediately after start/ACTIVE.
183      * @hide
184      */
185     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
186 
187     // Error codes:
188     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
189     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
190     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
191     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
192     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
193     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
194 
195     // Events:
196     // to keep in sync with frameworks/av/include/media/AudioTrack.h
197     // Note: To avoid collisions with other event constants,
198     // do not define an event here that is the same value as
199     // AudioSystem.NATIVE_EVENT_ROUTING_CHANGE.
200 
201     /**
202      * Event id denotes when playback head has reached a previously set marker.
203      */
204     private static final int NATIVE_EVENT_MARKER  = 3;
205     /**
206      * Event id denotes when previously set update period has elapsed during playback.
207      */
208     private static final int NATIVE_EVENT_NEW_POS = 4;
209     /**
210      * Callback for more data
211      */
212     private static final int NATIVE_EVENT_CAN_WRITE_MORE_DATA = 9;
213     /**
214      * IAudioTrack tear down for offloaded tracks
215      * TODO: when received, java AudioTrack must be released
216      */
217     private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6;
218     /**
219      * Event id denotes when all the buffers queued in AF and HW are played
220      * back (after stop is called) for an offloaded track.
221      */
222     private static final int NATIVE_EVENT_STREAM_END = 7;
223     /**
224      * Event id denotes when the codec format changes.
225      *
226      * Note: Similar to a device routing change (AudioSystem.NATIVE_EVENT_ROUTING_CHANGE),
227      * this event comes from the AudioFlinger Thread / Output Stream management
228      * (not from buffer indications as above).
229      */
230     private static final int NATIVE_EVENT_CODEC_FORMAT_CHANGE = 100;
231 
232     private final static String TAG = "android.media.AudioTrack";
233 
234     /** @hide */
235     @IntDef({
236         ENCAPSULATION_MODE_NONE,
237         ENCAPSULATION_MODE_ELEMENTARY_STREAM,
238         // ENCAPSULATION_MODE_HANDLE, @SystemApi
239     })
240     @Retention(RetentionPolicy.SOURCE)
241     public @interface EncapsulationMode {}
242 
243     // Important: The ENCAPSULATION_MODE values must be kept in sync with native header files.
244     /**
245      * This mode indicates no metadata encapsulation,
246      * which is the default mode for sending audio data
247      * through {@code AudioTrack}.
248      */
249     public static final int ENCAPSULATION_MODE_NONE = 0;
250     /**
251      * This mode indicates metadata encapsulation with an elementary stream payload.
252      * Both compressed and PCM format is allowed.
253      */
254     public static final int ENCAPSULATION_MODE_ELEMENTARY_STREAM = 1;
255     /**
256      * This mode indicates metadata encapsulation with a handle payload
257      * and is set through {@link Builder#setEncapsulationMode(int)}.
258      * The handle is a 64 bit long, provided by the Tuner API
259      * in {@link android.os.Build.VERSION_CODES#R}.
260      * @hide
261      */
262     @SystemApi
263     @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
264     public static final int ENCAPSULATION_MODE_HANDLE = 2;
265 
266     /* Enumeration of metadata types permitted for use by
267      * encapsulation mode audio streams.
268      */
269     /** @hide */
270     @IntDef(prefix = { "ENCAPSULATION_METADATA_TYPE_" }, value = {
271         ENCAPSULATION_METADATA_TYPE_NONE, /* reserved */
272         ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER,
273         ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR,
274     })
275     @Retention(RetentionPolicy.SOURCE)
276     public @interface EncapsulationMetadataType {}
277 
278     /**
279      * Reserved do not use.
280      * @hide
281      */
282     public static final int ENCAPSULATION_METADATA_TYPE_NONE = 0; // reserved
283 
284     /**
285      * Encapsulation metadata type for framework tuner information.
286      *
287      * Refer to the Android Media TV Tuner API for details.
288      */
289     public static final int ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER = 1;
290 
291     /**
292      * Encapsulation metadata type for DVB AD descriptor.
293      *
294      * This metadata is formatted per ETSI TS 101 154 Table E.1: AD_descriptor.
295      */
296     public static final int ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR = 2;
297 
298     /* Dual Mono handling is used when a stereo audio stream
299      * contains separate audio content on the left and right channels.
300      * Such information about the content of the stream may be found, for example, in
301      * ITU T-REC-J.94-201610 A.6.2.3 Component descriptor.
302      */
303     /** @hide */
304     @IntDef({
305         DUAL_MONO_MODE_OFF,
306         DUAL_MONO_MODE_LR,
307         DUAL_MONO_MODE_LL,
308         DUAL_MONO_MODE_RR,
309     })
310     @Retention(RetentionPolicy.SOURCE)
311     public @interface DualMonoMode {}
312     // Important: The DUAL_MONO_MODE values must be kept in sync with native header files.
313     /**
314      * This mode disables any Dual Mono presentation effect.
315      *
316      */
317     public static final int DUAL_MONO_MODE_OFF = 0;
318 
319     /**
320      * This mode indicates that a stereo stream should be presented
321      * with the left and right audio channels blended together
322      * and delivered to both channels.
323      *
324      * Behavior for non-stereo streams is implementation defined.
325      * A suggested guideline is that the left-right stereo symmetric
326      * channels are pairwise blended;
327      * the other channels such as center are left alone.
328      *
329      * The Dual Mono effect occurs before volume scaling.
330      */
331     public static final int DUAL_MONO_MODE_LR = 1;
332 
333     /**
334      * This mode indicates that a stereo stream should be presented
335      * with the left audio channel replicated into the right audio channel.
336      *
337      * Behavior for non-stereo streams is implementation defined.
338      * A suggested guideline is that all channels with left-right
339      * stereo symmetry will have the left channel position replicated
340      * into the right channel position.
341      * The center channels (with no left/right symmetry) or unbalanced
342      * channels are left alone.
343      *
344      * The Dual Mono effect occurs before volume scaling.
345      */
346     public static final int DUAL_MONO_MODE_LL = 2;
347 
348     /**
349      * This mode indicates that a stereo stream should be presented
350      * with the right audio channel replicated into the left audio channel.
351      *
352      * Behavior for non-stereo streams is implementation defined.
353      * A suggested guideline is that all channels with left-right
354      * stereo symmetry will have the right channel position replicated
355      * into the left channel position.
356      * The center channels (with no left/right symmetry) or unbalanced
357      * channels are left alone.
358      *
359      * The Dual Mono effect occurs before volume scaling.
360      */
361     public static final int DUAL_MONO_MODE_RR = 3;
362 
363     /** @hide */
364     @IntDef({
365         WRITE_BLOCKING,
366         WRITE_NON_BLOCKING
367     })
368     @Retention(RetentionPolicy.SOURCE)
369     public @interface WriteMode {}
370 
371     /**
372      * The write mode indicating the write operation will block until all data has been written,
373      * to be used as the actual value of the writeMode parameter in
374      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
375      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
376      * {@link #write(ByteBuffer, int, int, long)}.
377      */
378     public final static int WRITE_BLOCKING = 0;
379 
380     /**
381      * The write mode indicating the write operation will return immediately after
382      * queuing as much audio data for playback as possible without blocking,
383      * to be used as the actual value of the writeMode parameter in
384      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
385      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
386      * {@link #write(ByteBuffer, int, int, long)}.
387      */
388     public final static int WRITE_NON_BLOCKING = 1;
389 
390     /** @hide */
391     @IntDef({
392         PERFORMANCE_MODE_NONE,
393         PERFORMANCE_MODE_LOW_LATENCY,
394         PERFORMANCE_MODE_POWER_SAVING
395     })
396     @Retention(RetentionPolicy.SOURCE)
397     public @interface PerformanceMode {}
398 
399     /**
400      * Default performance mode for an {@link AudioTrack}.
401      */
402     public static final int PERFORMANCE_MODE_NONE = 0;
403 
404     /**
405      * Low latency performance mode for an {@link AudioTrack}.
406      * If the device supports it, this mode
407      * enables a lower latency path through to the audio output sink.
408      * Effects may no longer work with such an {@code AudioTrack} and
409      * the sample rate must match that of the output sink.
410      * <p>
411      * Applications should be aware that low latency requires careful
412      * buffer management, with smaller chunks of audio data written by each
413      * {@code write()} call.
414      * <p>
415      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
416      * {@code AudioTrack}'s actual buffer size may be too small.
417      * It is recommended that a fairly
418      * large buffer should be specified when the {@code AudioTrack} is created.
419      * Then the actual size can be reduced by calling
420      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
421      * by lowering it after each {@code write()} call until the audio glitches,
422      * which is detected by calling
423      * {@link #getUnderrunCount()}. Then the buffer size can be increased
424      * until there are no glitches.
425      * This tuning step should be done while playing silence.
426      * This technique provides a compromise between latency and glitch rate.
427      */
428     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
429 
430     /**
431      * Power saving performance mode for an {@link AudioTrack}.
432      * If the device supports it, this
433      * mode will enable a lower power path to the audio output sink.
434      * In addition, this lower power path typically will have
435      * deeper internal buffers and better underrun resistance,
436      * with a tradeoff of higher latency.
437      * <p>
438      * In this mode, applications should attempt to use a larger buffer size
439      * and deliver larger chunks of audio data per {@code write()} call.
440      * Use {@link #getBufferSizeInFrames()} to determine
441      * the actual buffer size of the {@code AudioTrack} as it may have increased
442      * to accommodate a deeper buffer.
443      */
444     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
445 
446     // keep in sync with system/media/audio/include/system/audio-base.h
447     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
448     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
449 
450     // Size of HW_AV_SYNC track AV header.
451     private static final float HEADER_V2_SIZE_BYTES = 20.0f;
452 
453     //--------------------------------------------------------------------------
454     // Member variables
455     //--------------------
456     /**
457      * Indicates the state of the AudioTrack instance.
458      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
459      */
460     private int mState = STATE_UNINITIALIZED;
461     /**
462      * Indicates the play state of the AudioTrack instance.
463      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
464      */
465     private int mPlayState = PLAYSTATE_STOPPED;
466 
467     /**
468      * Indicates that we are expecting an end of stream callback following a call
469      * to setOffloadEndOfStream() in a gapless track transition context. The native track
470      * will be restarted automatically.
471      */
472     private boolean mOffloadEosPending = false;
473 
474     /**
475      * Lock to ensure mPlayState updates reflect the actual state of the object.
476      */
477     private final Object mPlayStateLock = new Object();
478     /**
479      * Sizes of the audio buffer.
480      * These values are set during construction and can be stale.
481      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
482      */
483     private int mNativeBufferSizeInBytes = 0;
484     private int mNativeBufferSizeInFrames = 0;
485     /**
486      * Handler for events coming from the native code.
487      */
488     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
489     /**
490      * Looper associated with the thread that creates the AudioTrack instance.
491      */
492     private final Looper mInitializationLooper;
493     /**
494      * The audio data source sampling rate in Hz.
495      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
496      */
497     private int mSampleRate; // initialized by all constructors via audioParamCheck()
498     /**
499      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
500      */
501     private int mChannelCount = 1;
502     /**
503      * The audio channel mask used for calling native AudioTrack
504      */
505     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
506 
507     /**
508      * The type of the audio stream to play. See
509      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
510      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
511      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
512      *   {@link AudioManager#STREAM_DTMF}.
513      */
514     @UnsupportedAppUsage
515     private int mStreamType = AudioManager.STREAM_MUSIC;
516 
517     /**
518      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
519      */
520     private int mDataLoadMode = MODE_STREAM;
521     /**
522      * The current channel position mask, as specified on AudioTrack creation.
523      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
524      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
525      */
526     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
527     /**
528      * The channel index mask if specified, otherwise 0.
529      */
530     private int mChannelIndexMask = 0;
531     /**
532      * The encoding of the audio samples.
533      * @see AudioFormat#ENCODING_PCM_8BIT
534      * @see AudioFormat#ENCODING_PCM_16BIT
535      * @see AudioFormat#ENCODING_PCM_FLOAT
536      */
537     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
538     /**
539      * The AudioAttributes used in configuration.
540      */
541     private AudioAttributes mConfiguredAudioAttributes;
542     /**
543      * Audio session ID
544      */
545     private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
546     /**
547      * HW_AV_SYNC track AV Sync Header
548      */
549     private ByteBuffer mAvSyncHeader = null;
550     /**
551      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
552      */
553     private int mAvSyncBytesRemaining = 0;
554     /**
555      * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header.
556      */
557     private int mOffset = 0;
558     /**
559      * Indicates whether the track is intended to play in offload mode.
560      */
561     private boolean mOffloaded = false;
562     /**
563      * When offloaded track: delay for decoder in frames
564      */
565     private int mOffloadDelayFrames = 0;
566     /**
567      * When offloaded track: padding for decoder in frames
568      */
569     private int mOffloadPaddingFrames = 0;
570 
571     /**
572      * The log session id used for metrics.
573      * {@link LogSessionId#LOG_SESSION_ID_NONE} here means it is not set.
574      */
575     @NonNull private LogSessionId mLogSessionId = LogSessionId.LOG_SESSION_ID_NONE;
576 
577     //--------------------------------
578     // Used exclusively by native code
579     //--------------------
580     /**
581      * @hide
582      * Accessed by native methods: provides access to C++ AudioTrack object.
583      */
584     @SuppressWarnings("unused")
585     @UnsupportedAppUsage
586     protected long mNativeTrackInJavaObj;
587     /**
588      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
589      * the native AudioTrack object, but not stored in it).
590      */
591     @SuppressWarnings("unused")
592     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
593     private long mJniData;
594 
595 
596     //--------------------------------------------------------------------------
597     // Constructor, Finalize
598     //--------------------
599     /**
600      * Class constructor.
601      * @param streamType the type of the audio stream. See
602      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
603      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
604      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
605      * @param sampleRateInHz the initial source sample rate expressed in Hz.
606      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
607      *   which is usually the sample rate of the sink.
608      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
609      * @param channelConfig describes the configuration of the audio channels.
610      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
611      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
612      * @param audioFormat the format in which the audio data is represented.
613      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
614      *   {@link AudioFormat#ENCODING_PCM_8BIT},
615      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
616      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
617      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
618      *   <p> If the track's creation mode is {@link #MODE_STATIC},
619      *   this is the maximum length sample, or audio clip, that can be played by this instance.
620      *   <p> If the track's creation mode is {@link #MODE_STREAM},
621      *   this should be the desired buffer size
622      *   for the <code>AudioTrack</code> to satisfy the application's
623      *   latency requirements.
624      *   If <code>bufferSizeInBytes</code> is less than the
625      *   minimum buffer size for the output sink, it is increased to the minimum
626      *   buffer size.
627      *   The method {@link #getBufferSizeInFrames()} returns the
628      *   actual size in frames of the buffer created, which
629      *   determines the minimum frequency to write
630      *   to the streaming <code>AudioTrack</code> to avoid underrun.
631      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
632      *   for an AudioTrack instance in streaming mode.
633      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
634      * @throws java.lang.IllegalArgumentException
635      * @deprecated use {@link Builder} or
636      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
637      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
638      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)639     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
640             int bufferSizeInBytes, int mode)
641     throws IllegalArgumentException {
642         this(streamType, sampleRateInHz, channelConfig, audioFormat,
643                 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
644     }
645 
646     /**
647      * Class constructor with audio session. Use this constructor when the AudioTrack must be
648      * attached to a particular audio session. The primary use of the audio session ID is to
649      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
650      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
651      * and media players in the same session and not to the output mix.
652      * When an AudioTrack is created without specifying a session, it will create its own session
653      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
654      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
655      * session
656      * with all other media players or audio tracks in the same session, otherwise a new session
657      * will be created for this track if none is supplied.
658      * @param streamType the type of the audio stream. See
659      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
660      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
661      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
662      * @param sampleRateInHz the initial source sample rate expressed in Hz.
663      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
664      *   which is usually the sample rate of the sink.
665      * @param channelConfig describes the configuration of the audio channels.
666      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
667      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
668      * @param audioFormat the format in which the audio data is represented.
669      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
670      *   {@link AudioFormat#ENCODING_PCM_8BIT},
671      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
672      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
673      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
674      *   <p> If the track's creation mode is {@link #MODE_STATIC},
675      *   this is the maximum length sample, or audio clip, that can be played by this instance.
676      *   <p> If the track's creation mode is {@link #MODE_STREAM},
677      *   this should be the desired buffer size
678      *   for the <code>AudioTrack</code> to satisfy the application's
679      *   latency requirements.
680      *   If <code>bufferSizeInBytes</code> is less than the
681      *   minimum buffer size for the output sink, it is increased to the minimum
682      *   buffer size.
683      *   The method {@link #getBufferSizeInFrames()} returns the
684      *   actual size in frames of the buffer created, which
685      *   determines the minimum frequency to write
686      *   to the streaming <code>AudioTrack</code> to avoid underrun.
687      *   You can write data into this buffer in smaller chunks than this size.
688      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
689      *   for an AudioTrack instance in streaming mode.
690      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
691      * @param sessionId Id of audio session the AudioTrack must be attached to
692      * @throws java.lang.IllegalArgumentException
693      * @deprecated use {@link Builder} or
694      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
695      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
696      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)697     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
698             int bufferSizeInBytes, int mode, int sessionId)
699     throws IllegalArgumentException {
700         // mState already == STATE_UNINITIALIZED
701         this((new AudioAttributes.Builder())
702                     .setLegacyStreamType(streamType)
703                     .build(),
704                 (new AudioFormat.Builder())
705                     .setChannelMask(channelConfig)
706                     .setEncoding(audioFormat)
707                     .setSampleRate(sampleRateInHz)
708                     .build(),
709                 bufferSizeInBytes,
710                 mode, sessionId);
711         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
712     }
713 
714     /**
715      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
716      * @param attributes a non-null {@link AudioAttributes} instance.
717      * @param format a non-null {@link AudioFormat} instance describing the format of the data
718      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
719      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
720      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
721      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
722      *   <p> If the track's creation mode is {@link #MODE_STATIC},
723      *   this is the maximum length sample, or audio clip, that can be played by this instance.
724      *   <p> If the track's creation mode is {@link #MODE_STREAM},
725      *   this should be the desired buffer size
726      *   for the <code>AudioTrack</code> to satisfy the application's
727      *   latency requirements.
728      *   If <code>bufferSizeInBytes</code> is less than the
729      *   minimum buffer size for the output sink, it is increased to the minimum
730      *   buffer size.
731      *   The method {@link #getBufferSizeInFrames()} returns the
732      *   actual size in frames of the buffer created, which
733      *   determines the minimum frequency to write
734      *   to the streaming <code>AudioTrack</code> to avoid underrun.
735      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
736      *   for an AudioTrack instance in streaming mode.
737      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
738      * @param sessionId ID of audio session the AudioTrack must be attached to, or
739      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
740      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
741      *   construction.
742      * @throws IllegalArgumentException
743      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)744     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
745             int mode, int sessionId)
746                     throws IllegalArgumentException {
747         this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/,
748                 ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
749     }
750 
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode, @Nullable TunerConfiguration tunerConfiguration)751     private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
752             int mode, int sessionId, boolean offload, int encapsulationMode,
753             @Nullable TunerConfiguration tunerConfiguration)
754                     throws IllegalArgumentException {
755         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
756         // mState already == STATE_UNINITIALIZED
757 
758         mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
759 
760         if (format == null) {
761             throw new IllegalArgumentException("Illegal null AudioFormat");
762         }
763 
764         // Check if we should enable deep buffer mode
765         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
766             mAttributes = new AudioAttributes.Builder(mAttributes)
767                 .replaceFlags((mAttributes.getAllFlags()
768                         | AudioAttributes.FLAG_DEEP_BUFFER)
769                         & ~AudioAttributes.FLAG_LOW_LATENCY)
770                 .build();
771         }
772 
773         // remember which looper is associated with the AudioTrack instantiation
774         Looper looper;
775         if ((looper = Looper.myLooper()) == null) {
776             looper = Looper.getMainLooper();
777         }
778 
779         int rate = format.getSampleRate();
780         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
781             rate = 0;
782         }
783 
784         int channelIndexMask = 0;
785         if ((format.getPropertySetMask()
786                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
787             channelIndexMask = format.getChannelIndexMask();
788         }
789         int channelMask = 0;
790         if ((format.getPropertySetMask()
791                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
792             channelMask = format.getChannelMask();
793         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
794             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
795                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
796         }
797         int encoding = AudioFormat.ENCODING_DEFAULT;
798         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
799             encoding = format.getEncoding();
800         }
801         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
802         mOffloaded = offload;
803         mStreamType = AudioSystem.STREAM_DEFAULT;
804 
805         audioBuffSizeCheck(bufferSizeInBytes);
806 
807         mInitializationLooper = looper;
808 
809         if (sessionId < 0) {
810             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
811         }
812 
813         int[] sampleRate = new int[] {mSampleRate};
814         int[] session = new int[1];
815         session[0] = sessionId;
816         // native initialization
817         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
818                 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
819                 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
820                 offload, encapsulationMode, tunerConfiguration,
821                 getCurrentOpPackageName());
822         if (initResult != SUCCESS) {
823             loge("Error code "+initResult+" when initializing AudioTrack.");
824             return; // with mState == STATE_UNINITIALIZED
825         }
826 
827         mSampleRate = sampleRate[0];
828         mSessionId = session[0];
829 
830         // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.
831 
832         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
833             int frameSizeInBytes;
834             if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
835                 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
836             } else {
837                 frameSizeInBytes = 1;
838             }
839             mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
840         }
841 
842         if (mDataLoadMode == MODE_STATIC) {
843             mState = STATE_NO_STATIC_DATA;
844         } else {
845             mState = STATE_INITIALIZED;
846         }
847 
848         baseRegisterPlayer(mSessionId);
849         native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack.
850     }
851 
852     /**
853      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
854      * the AudioTrackRoutingProxy subclass.
855      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
856      * (associated with an OpenSL ES player).
857      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
858      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
859      * it means that the OpenSL player interface hasn't been realized, so there is no native
860      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
861      * OpenSLES interface is realized.
862      */
AudioTrack(long nativeTrackInJavaObj)863     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
864         super(new AudioAttributes.Builder().build(),
865                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
866         // "final"s
867         mNativeTrackInJavaObj = 0;
868         mJniData = 0;
869 
870         // remember which looper is associated with the AudioTrack instantiation
871         Looper looper;
872         if ((looper = Looper.myLooper()) == null) {
873             looper = Looper.getMainLooper();
874         }
875         mInitializationLooper = looper;
876 
877         // other initialization...
878         if (nativeTrackInJavaObj != 0) {
879             baseRegisterPlayer(AudioSystem.AUDIO_SESSION_ALLOCATE);
880             deferred_connect(nativeTrackInJavaObj);
881         } else {
882             mState = STATE_UNINITIALIZED;
883         }
884     }
885 
886     /**
887      * @hide
888      */
889     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
deferred_connect(long nativeTrackInJavaObj)890     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
891         if (mState != STATE_INITIALIZED) {
892             // Note that for this native_setup, we are providing an already created/initialized
893             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
894             int[] session = { 0 };
895             int[] rates = { 0 };
896             int initResult = native_setup(new WeakReference<AudioTrack>(this),
897                     null /*mAttributes - NA*/,
898                     rates /*sampleRate - NA*/,
899                     0 /*mChannelMask - NA*/,
900                     0 /*mChannelIndexMask - NA*/,
901                     0 /*mAudioFormat - NA*/,
902                     0 /*mNativeBufferSizeInBytes - NA*/,
903                     0 /*mDataLoadMode - NA*/,
904                     session,
905                     nativeTrackInJavaObj,
906                     false /*offload*/,
907                     ENCAPSULATION_MODE_NONE,
908                     null /* tunerConfiguration */,
909                     "" /* opPackagename */);
910             if (initResult != SUCCESS) {
911                 loge("Error code "+initResult+" when initializing AudioTrack.");
912                 return; // with mState == STATE_UNINITIALIZED
913             }
914 
915             mSessionId = session[0];
916 
917             mState = STATE_INITIALIZED;
918         }
919     }
920 
921     /**
922      * TunerConfiguration is used to convey tuner information
923      * from the android.media.tv.Tuner API to AudioTrack construction.
924      *
925      * Use the Builder to construct the TunerConfiguration object,
926      * which is then used by the {@link AudioTrack.Builder} to create an AudioTrack.
927      * @hide
928      */
929     @SystemApi
930     public static class TunerConfiguration {
931         private final int mContentId;
932         private final int mSyncId;
933 
934         /**
935          * A special content id for {@link #TunerConfiguration(int, int)}
936          * indicating audio is delivered
937          * from an {@code AudioTrack} write, not tunneled from the tuner stack.
938          */
939         public static final int CONTENT_ID_NONE = 0;
940 
941         /**
942          * Constructs a TunerConfiguration instance for use in {@link AudioTrack.Builder}
943          *
944          * @param contentId selects the audio stream to use.
945          *     The contentId may be obtained from
946          *     {@link android.media.tv.tuner.filter.Filter#getId()},
947          *     such obtained id is always a positive number.
948          *     If audio is to be delivered through an {@code AudioTrack} write
949          *     then {@code CONTENT_ID_NONE} may be used.
950          * @param syncId selects the clock to use for synchronization
951          *     of audio with other streams such as video.
952          *     The syncId may be obtained from
953          *     {@link android.media.tv.tuner.Tuner#getAvSyncHwId()}.
954          *     This is always a positive number.
955          */
956         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
TunerConfiguration( @ntRangefrom = 0) int contentId, @IntRange(from = 1)int syncId)957         public TunerConfiguration(
958                 @IntRange(from = 0) int contentId, @IntRange(from = 1)int syncId) {
959             if (contentId < 0) {
960                 throw new IllegalArgumentException(
961                         "contentId " + contentId + " must be positive or CONTENT_ID_NONE");
962             }
963             if (syncId < 1) {
964                 throw new IllegalArgumentException("syncId " + syncId + " must be positive");
965             }
966             mContentId = contentId;
967             mSyncId = syncId;
968         }
969 
970         /**
971          * Returns the contentId.
972          */
973         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getContentId()974         public @IntRange(from = 1) int getContentId() {
975             return mContentId; // The Builder ensures this is > 0.
976         }
977 
978         /**
979          * Returns the syncId.
980          */
981         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getSyncId()982         public @IntRange(from = 1) int getSyncId() {
983             return mSyncId;  // The Builder ensures this is > 0.
984         }
985     }
986 
987     /**
988      * Builder class for {@link AudioTrack} objects.
989      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
990      * attributes and audio format parameters, you indicate which of those vary from the default
991      * behavior on the device.
992      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
993      * parameters, to be used by a new <code>AudioTrack</code> instance:
994      *
995      * <pre class="prettyprint">
996      * AudioTrack player = new AudioTrack.Builder()
997      *         .setAudioAttributes(new AudioAttributes.Builder()
998      *                  .setUsage(AudioAttributes.USAGE_ALARM)
999      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
1000      *                  .build())
1001      *         .setAudioFormat(new AudioFormat.Builder()
1002      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
1003      *                 .setSampleRate(44100)
1004      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1005      *                 .build())
1006      *         .setBufferSizeInBytes(minBuffSize)
1007      *         .build();
1008      * </pre>
1009      * <p>
1010      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
1011      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
1012      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
1013      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
1014      * {@link AudioFormat#ENCODING_PCM_16BIT}.
1015      * The sample rate will depend on the device actually selected for playback and can be queried
1016      * with {@link #getSampleRate()} method.
1017      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
1018      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
1019      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
1020      * <code>MODE_STREAM</code> will be used.
1021      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
1022      * be generated.
1023      * <br>Offload is false by default.
1024      */
1025     public static class Builder {
1026         private AudioAttributes mAttributes;
1027         private AudioFormat mFormat;
1028         private int mBufferSizeInBytes;
1029         private int mEncapsulationMode = ENCAPSULATION_MODE_NONE;
1030         private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
1031         private int mMode = MODE_STREAM;
1032         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
1033         private boolean mOffload = false;
1034         private TunerConfiguration mTunerConfiguration;
1035 
1036         /**
1037          * Constructs a new Builder with the default values as described above.
1038          */
Builder()1039         public Builder() {
1040         }
1041 
1042         /**
1043          * Sets the {@link AudioAttributes}.
1044          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
1045          *     data to be played.
1046          * @return the same Builder instance.
1047          * @throws IllegalArgumentException
1048          */
setAudioAttributes(@onNull AudioAttributes attributes)1049         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
1050                 throws IllegalArgumentException {
1051             if (attributes == null) {
1052                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1053             }
1054             // keep reference, we only copy the data when building
1055             mAttributes = attributes;
1056             return this;
1057         }
1058 
1059         /**
1060          * Sets the format of the audio data to be played by the {@link AudioTrack}.
1061          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
1062          * as encoding, channel mask and sample rate.
1063          * @param format a non-null {@link AudioFormat} instance.
1064          * @return the same Builder instance.
1065          * @throws IllegalArgumentException
1066          */
setAudioFormat(@onNull AudioFormat format)1067         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
1068                 throws IllegalArgumentException {
1069             if (format == null) {
1070                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
1071             }
1072             // keep reference, we only copy the data when building
1073             mFormat = format;
1074             return this;
1075         }
1076 
1077         /**
1078          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
1079          * If using the {@link AudioTrack} in streaming mode
1080          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
1081          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
1082          * the estimated minimum buffer size for the creation of an AudioTrack instance
1083          * in streaming mode.
1084          * <br>If using the <code>AudioTrack</code> in static mode (see
1085          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
1086          * played by this instance.
1087          * @param bufferSizeInBytes
1088          * @return the same Builder instance.
1089          * @throws IllegalArgumentException
1090          */
setBufferSizeInBytes(@ntRangefrom = 0) int bufferSizeInBytes)1091         public @NonNull Builder setBufferSizeInBytes(@IntRange(from = 0) int bufferSizeInBytes)
1092                 throws IllegalArgumentException {
1093             if (bufferSizeInBytes <= 0) {
1094                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
1095             }
1096             mBufferSizeInBytes = bufferSizeInBytes;
1097             return this;
1098         }
1099 
1100         /**
1101          * Sets the encapsulation mode.
1102          *
1103          * Encapsulation mode allows metadata to be sent together with
1104          * the audio data payload in a {@code ByteBuffer}.
1105          * This requires a compatible hardware audio codec.
1106          *
1107          * @param encapsulationMode one of {@link AudioTrack#ENCAPSULATION_MODE_NONE},
1108          *        or {@link AudioTrack#ENCAPSULATION_MODE_ELEMENTARY_STREAM}.
1109          * @return the same Builder instance.
1110          */
1111         // Note: with the correct permission {@code AudioTrack#ENCAPSULATION_MODE_HANDLE}
1112         // may be used as well.
setEncapsulationMode(@ncapsulationMode int encapsulationMode)1113         public @NonNull Builder setEncapsulationMode(@EncapsulationMode int encapsulationMode) {
1114             switch (encapsulationMode) {
1115                 case ENCAPSULATION_MODE_NONE:
1116                 case ENCAPSULATION_MODE_ELEMENTARY_STREAM:
1117                 case ENCAPSULATION_MODE_HANDLE:
1118                     mEncapsulationMode = encapsulationMode;
1119                     break;
1120                 default:
1121                     throw new IllegalArgumentException(
1122                             "Invalid encapsulation mode " + encapsulationMode);
1123             }
1124             return this;
1125         }
1126 
1127         /**
1128          * Sets the mode under which buffers of audio data are transferred from the
1129          * {@link AudioTrack} to the framework.
1130          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
1131          * @return the same Builder instance.
1132          * @throws IllegalArgumentException
1133          */
setTransferMode(@ransferMode int mode)1134         public @NonNull Builder setTransferMode(@TransferMode int mode)
1135                 throws IllegalArgumentException {
1136             switch(mode) {
1137                 case MODE_STREAM:
1138                 case MODE_STATIC:
1139                     mMode = mode;
1140                     break;
1141                 default:
1142                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
1143             }
1144             return this;
1145         }
1146 
1147         /**
1148          * Sets the session ID the {@link AudioTrack} will be attached to.
1149          * @param sessionId a strictly positive ID number retrieved from another
1150          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
1151          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
1152          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
1153          * @return the same Builder instance.
1154          * @throws IllegalArgumentException
1155          */
setSessionId(@ntRangefrom = 1) int sessionId)1156         public @NonNull Builder setSessionId(@IntRange(from = 1) int sessionId)
1157                 throws IllegalArgumentException {
1158             if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
1159                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
1160             }
1161             mSessionId = sessionId;
1162             return this;
1163         }
1164 
1165         /**
1166          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
1167          * may not be supported by the particular device, and the framework is free
1168          * to ignore such request if it is incompatible with other requests or hardware.
1169          *
1170          * @param performanceMode one of
1171          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
1172          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
1173          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
1174          * @return the same Builder instance.
1175          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
1176          */
setPerformanceMode(@erformanceMode int performanceMode)1177         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
1178             switch (performanceMode) {
1179                 case PERFORMANCE_MODE_NONE:
1180                 case PERFORMANCE_MODE_LOW_LATENCY:
1181                 case PERFORMANCE_MODE_POWER_SAVING:
1182                     mPerformanceMode = performanceMode;
1183                     break;
1184                 default:
1185                     throw new IllegalArgumentException(
1186                             "Invalid performance mode " + performanceMode);
1187             }
1188             return this;
1189         }
1190 
1191         /**
1192          * Sets whether this track will play through the offloaded audio path.
1193          * When set to true, at build time, the audio format will be checked against
1194          * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)}
1195          * to verify the audio format used by this track is supported on the device's offload
1196          * path (if any).
1197          * <br>Offload is only supported for media audio streams, and therefore requires that
1198          * the usage be {@link AudioAttributes#USAGE_MEDIA}.
1199          * @param offload true to require the offload path for playback.
1200          * @return the same Builder instance.
1201          */
setOffloadedPlayback(boolean offload)1202         public @NonNull Builder setOffloadedPlayback(boolean offload) {
1203             mOffload = offload;
1204             return this;
1205         }
1206 
1207         /**
1208          * Sets the tuner configuration for the {@code AudioTrack}.
1209          *
1210          * The {@link AudioTrack.TunerConfiguration} consists of parameters obtained from
1211          * the Android TV tuner API which indicate the audio content stream id and the
1212          * synchronization id for the {@code AudioTrack}.
1213          *
1214          * @param tunerConfiguration obtained by {@link AudioTrack.TunerConfiguration.Builder}.
1215          * @return the same Builder instance.
1216          * @hide
1217          */
1218         @SystemApi
1219         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
setTunerConfiguration( @onNull TunerConfiguration tunerConfiguration)1220         public @NonNull Builder setTunerConfiguration(
1221                 @NonNull TunerConfiguration tunerConfiguration) {
1222             if (tunerConfiguration == null) {
1223                 throw new IllegalArgumentException("tunerConfiguration is null");
1224             }
1225             mTunerConfiguration = tunerConfiguration;
1226             return this;
1227         }
1228 
1229         /**
1230          * Builds an {@link AudioTrack} instance initialized with all the parameters set
1231          * on this <code>Builder</code>.
1232          * @return a new successfully initialized {@link AudioTrack} instance.
1233          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
1234          *     were incompatible, or if they are not supported by the device,
1235          *     or if the device was not available.
1236          */
build()1237         public @NonNull AudioTrack build() throws UnsupportedOperationException {
1238             if (mAttributes == null) {
1239                 mAttributes = new AudioAttributes.Builder()
1240                         .setUsage(AudioAttributes.USAGE_MEDIA)
1241                         .build();
1242             }
1243             switch (mPerformanceMode) {
1244             case PERFORMANCE_MODE_LOW_LATENCY:
1245                 mAttributes = new AudioAttributes.Builder(mAttributes)
1246                     .replaceFlags((mAttributes.getAllFlags()
1247                             | AudioAttributes.FLAG_LOW_LATENCY)
1248                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
1249                     .build();
1250                 break;
1251             case PERFORMANCE_MODE_NONE:
1252                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
1253                     break; // do not enable deep buffer mode.
1254                 }
1255                 // permitted to fall through to enable deep buffer
1256             case PERFORMANCE_MODE_POWER_SAVING:
1257                 mAttributes = new AudioAttributes.Builder(mAttributes)
1258                 .replaceFlags((mAttributes.getAllFlags()
1259                         | AudioAttributes.FLAG_DEEP_BUFFER)
1260                         & ~AudioAttributes.FLAG_LOW_LATENCY)
1261                 .build();
1262                 break;
1263             }
1264 
1265             if (mFormat == null) {
1266                 mFormat = new AudioFormat.Builder()
1267                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1268                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
1269                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
1270                         .build();
1271             }
1272 
1273             if (mOffload) {
1274                 if (mPerformanceMode == PERFORMANCE_MODE_LOW_LATENCY) {
1275                     throw new UnsupportedOperationException(
1276                             "Offload and low latency modes are incompatible");
1277                 }
1278                 if (AudioSystem.getOffloadSupport(mFormat, mAttributes)
1279                         == AudioSystem.OFFLOAD_NOT_SUPPORTED) {
1280                     throw new UnsupportedOperationException(
1281                             "Cannot create AudioTrack, offload format / attributes not supported");
1282                 }
1283             }
1284 
1285             // TODO: Check mEncapsulationMode compatibility with MODE_STATIC, etc?
1286 
1287             // If the buffer size is not specified in streaming mode,
1288             // use a single frame for the buffer size and let the
1289             // native code figure out the minimum buffer size.
1290             if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
1291                 int bytesPerSample = 1;
1292                 if (AudioFormat.isEncodingLinearFrames(mFormat.getEncoding())) {
1293                     try {
1294                         bytesPerSample = mFormat.getBytesPerSample(mFormat.getEncoding());
1295                     } catch (IllegalArgumentException e) {
1296                         // do nothing
1297                     }
1298                 }
1299                 mBufferSizeInBytes = mFormat.getChannelCount() * bytesPerSample;
1300             }
1301 
1302             try {
1303                 final AudioTrack track = new AudioTrack(
1304                         mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId, mOffload,
1305                         mEncapsulationMode, mTunerConfiguration);
1306                 if (track.getState() == STATE_UNINITIALIZED) {
1307                     // release is not necessary
1308                     throw new UnsupportedOperationException("Cannot create AudioTrack");
1309                 }
1310                 return track;
1311             } catch (IllegalArgumentException e) {
1312                 throw new UnsupportedOperationException(e.getMessage());
1313             }
1314         }
1315     }
1316 
1317     /**
1318      * Configures the delay and padding values for the current compressed stream playing
1319      * in offload mode.
1320      * This can only be used on a track successfully initialized with
1321      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. The unit is frames, where a
1322      * frame indicates the number of samples per channel, e.g. 100 frames for a stereo compressed
1323      * stream corresponds to 200 decoded interleaved PCM samples.
1324      * @param delayInFrames number of frames to be ignored at the beginning of the stream. A value
1325      *     of 0 indicates no delay is to be applied.
1326      * @param paddingInFrames number of frames to be ignored at the end of the stream. A value of 0
1327      *     of 0 indicates no padding is to be applied.
1328      */
setOffloadDelayPadding(@ntRangefrom = 0) int delayInFrames, @IntRange(from = 0) int paddingInFrames)1329     public void setOffloadDelayPadding(@IntRange(from = 0) int delayInFrames,
1330             @IntRange(from = 0) int paddingInFrames) {
1331         if (paddingInFrames < 0) {
1332             throw new IllegalArgumentException("Illegal negative padding");
1333         }
1334         if (delayInFrames < 0) {
1335             throw new IllegalArgumentException("Illegal negative delay");
1336         }
1337         if (!mOffloaded) {
1338             throw new IllegalStateException("Illegal use of delay/padding on non-offloaded track");
1339         }
1340         if (mState == STATE_UNINITIALIZED) {
1341             throw new IllegalStateException("Uninitialized track");
1342         }
1343         mOffloadDelayFrames = delayInFrames;
1344         mOffloadPaddingFrames = paddingInFrames;
1345         native_set_delay_padding(delayInFrames, paddingInFrames);
1346     }
1347 
1348     /**
1349      * Return the decoder delay of an offloaded track, expressed in frames, previously set with
1350      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1351      * <p>This delay indicates the number of frames to be ignored at the beginning of the stream.
1352      * This value can only be queried on a track successfully initialized with
1353      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1354      * @return decoder delay expressed in frames.
1355      */
getOffloadDelay()1356     public @IntRange(from = 0) int getOffloadDelay() {
1357         if (!mOffloaded) {
1358             throw new IllegalStateException("Illegal query of delay on non-offloaded track");
1359         }
1360         if (mState == STATE_UNINITIALIZED) {
1361             throw new IllegalStateException("Illegal query of delay on uninitialized track");
1362         }
1363         return mOffloadDelayFrames;
1364     }
1365 
1366     /**
1367      * Return the decoder padding of an offloaded track, expressed in frames, previously set with
1368      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1369      * <p>This padding indicates the number of frames to be ignored at the end of the stream.
1370      * This value can only be queried on a track successfully initialized with
1371      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1372      * @return decoder padding expressed in frames.
1373      */
getOffloadPadding()1374     public @IntRange(from = 0) int getOffloadPadding() {
1375         if (!mOffloaded) {
1376             throw new IllegalStateException("Illegal query of padding on non-offloaded track");
1377         }
1378         if (mState == STATE_UNINITIALIZED) {
1379             throw new IllegalStateException("Illegal query of padding on uninitialized track");
1380         }
1381         return mOffloadPaddingFrames;
1382     }
1383 
1384     /**
1385      * Declares that the last write() operation on this track provided the last buffer of this
1386      * stream.
1387      * After the end of stream, previously set padding and delay values are ignored.
1388      * Can only be called only if the AudioTrack is opened in offload mode
1389      * {@see Builder#setOffloadedPlayback(boolean)}.
1390      * Can only be called only if the AudioTrack is in state {@link #PLAYSTATE_PLAYING}
1391      * {@see #getPlayState()}.
1392      * Use this method in the same thread as any write() operation.
1393      */
setOffloadEndOfStream()1394     public void setOffloadEndOfStream() {
1395         if (!mOffloaded) {
1396             throw new IllegalStateException("EOS not supported on non-offloaded track");
1397         }
1398         if (mState == STATE_UNINITIALIZED) {
1399             throw new IllegalStateException("Uninitialized track");
1400         }
1401         if (mPlayState != PLAYSTATE_PLAYING) {
1402             throw new IllegalStateException("EOS not supported if not playing");
1403         }
1404         synchronized (mStreamEventCbLock) {
1405             if (mStreamEventCbInfoList.size() == 0) {
1406                 throw new IllegalStateException("EOS not supported without StreamEventCallback");
1407             }
1408         }
1409 
1410         synchronized (mPlayStateLock) {
1411             native_stop();
1412             mOffloadEosPending = true;
1413             mPlayState = PLAYSTATE_STOPPING;
1414         }
1415     }
1416 
1417     /**
1418      * Returns whether the track was built with {@link Builder#setOffloadedPlayback(boolean)} set
1419      * to {@code true}.
1420      * @return true if the track is using offloaded playback.
1421      */
isOffloadedPlayback()1422     public boolean isOffloadedPlayback() {
1423         return mOffloaded;
1424     }
1425 
1426     /**
1427      * Returns whether direct playback of an audio format with the provided attributes is
1428      * currently supported on the system.
1429      * <p>Direct playback means that the audio stream is not resampled or downmixed
1430      * by the framework. Checking for direct support can help the app select the representation
1431      * of audio content that most closely matches the capabilities of the device and peripherials
1432      * (e.g. A/V receiver) connected to it. Note that the provided stream can still be re-encoded
1433      * or mixed with other streams, if needed.
1434      * <p>Also note that this query only provides information about the support of an audio format.
1435      * It does not indicate whether the resources necessary for the playback are available
1436      * at that instant.
1437      * @param format a non-null {@link AudioFormat} instance describing the format of
1438      *   the audio data.
1439      * @param attributes a non-null {@link AudioAttributes} instance.
1440      * @return true if the given audio format can be played directly.
1441      */
isDirectPlaybackSupported(@onNull AudioFormat format, @NonNull AudioAttributes attributes)1442     public static boolean isDirectPlaybackSupported(@NonNull AudioFormat format,
1443             @NonNull AudioAttributes attributes) {
1444         if (format == null) {
1445             throw new IllegalArgumentException("Illegal null AudioFormat argument");
1446         }
1447         if (attributes == null) {
1448             throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1449         }
1450         return native_is_direct_output_supported(format.getEncoding(), format.getSampleRate(),
1451                 format.getChannelMask(), format.getChannelIndexMask(),
1452                 attributes.getContentType(), attributes.getUsage(), attributes.getFlags());
1453     }
1454 
1455     /*
1456      * The MAX_LEVEL should be exactly representable by an IEEE 754-2008 base32 float.
1457      * This means fractions must be divisible by a power of 2. For example,
1458      * 10.25f is OK as 0.25 is 1/4, but 10.1f is NOT OK as 1/10 is not expressable by
1459      * a finite binary fraction.
1460      *
1461      * 48.f is the nominal max for API level {@link android os.Build.VERSION_CODES#R}.
1462      * We use this to suggest a baseline range for implementation.
1463      *
1464      * The API contract specification allows increasing this value in a future
1465      * API release, but not decreasing this value.
1466      */
1467     private static final float MAX_AUDIO_DESCRIPTION_MIX_LEVEL = 48.f;
1468 
isValidAudioDescriptionMixLevel(float level)1469     private static boolean isValidAudioDescriptionMixLevel(float level) {
1470         return !(Float.isNaN(level) || level > MAX_AUDIO_DESCRIPTION_MIX_LEVEL);
1471     }
1472 
1473     /**
1474      * Sets the Audio Description mix level in dB.
1475      *
1476      * For AudioTracks incorporating a secondary Audio Description stream
1477      * (where such contents may be sent through an Encapsulation Mode
1478      * other than {@link #ENCAPSULATION_MODE_NONE}).
1479      * or internally by a HW channel),
1480      * the level of mixing of the Audio Description to the Main Audio stream
1481      * is controlled by this method.
1482      *
1483      * Such mixing occurs <strong>prior</strong> to overall volume scaling.
1484      *
1485      * @param level a floating point value between
1486      *     {@code Float.NEGATIVE_INFINITY} to {@code +48.f},
1487      *     where {@code Float.NEGATIVE_INFINITY} means the Audio Description is not mixed
1488      *     and a level of {@code 0.f} means the Audio Description is mixed without scaling.
1489      * @return true on success, false on failure.
1490      */
setAudioDescriptionMixLeveldB( @loatRangeto = 48.f, toInclusive = true) float level)1491     public boolean setAudioDescriptionMixLeveldB(
1492             @FloatRange(to = 48.f, toInclusive = true) float level) {
1493         if (!isValidAudioDescriptionMixLevel(level)) {
1494             throw new IllegalArgumentException("level is out of range" + level);
1495         }
1496         return native_set_audio_description_mix_level_db(level) == SUCCESS;
1497     }
1498 
1499     /**
1500      * Returns the Audio Description mix level in dB.
1501      *
1502      * If Audio Description mixing is unavailable from the hardware device,
1503      * a value of {@code Float.NEGATIVE_INFINITY} is returned.
1504      *
1505      * @return the current Audio Description Mix Level in dB.
1506      *     A value of {@code Float.NEGATIVE_INFINITY} means
1507      *     that the audio description is not mixed or
1508      *     the hardware is not available.
1509      *     This should reflect the <strong>true</strong> internal device mix level;
1510      *     hence the application might receive any floating value
1511      *     except {@code Float.NaN}.
1512      */
getAudioDescriptionMixLeveldB()1513     public float getAudioDescriptionMixLeveldB() {
1514         float[] level = { Float.NEGATIVE_INFINITY };
1515         try {
1516             final int status = native_get_audio_description_mix_level_db(level);
1517             if (status != SUCCESS || Float.isNaN(level[0])) {
1518                 return Float.NEGATIVE_INFINITY;
1519             }
1520         } catch (Exception e) {
1521             return Float.NEGATIVE_INFINITY;
1522         }
1523         return level[0];
1524     }
1525 
isValidDualMonoMode(@ualMonoMode int dualMonoMode)1526     private static boolean isValidDualMonoMode(@DualMonoMode int dualMonoMode) {
1527         switch (dualMonoMode) {
1528             case DUAL_MONO_MODE_OFF:
1529             case DUAL_MONO_MODE_LR:
1530             case DUAL_MONO_MODE_LL:
1531             case DUAL_MONO_MODE_RR:
1532                 return true;
1533             default:
1534                 return false;
1535         }
1536     }
1537 
1538     /**
1539      * Sets the Dual Mono mode presentation on the output device.
1540      *
1541      * The Dual Mono mode is generally applied to stereo audio streams
1542      * where the left and right channels come from separate sources.
1543      *
1544      * For compressed audio, where the decoding is done in hardware,
1545      * Dual Mono presentation needs to be performed
1546      * by the hardware output device
1547      * as the PCM audio is not available to the framework.
1548      *
1549      * @param dualMonoMode one of {@link #DUAL_MONO_MODE_OFF},
1550      *     {@link #DUAL_MONO_MODE_LR},
1551      *     {@link #DUAL_MONO_MODE_LL},
1552      *     {@link #DUAL_MONO_MODE_RR}.
1553      *
1554      * @return true on success, false on failure if the output device
1555      *     does not support Dual Mono mode.
1556      */
setDualMonoMode(@ualMonoMode int dualMonoMode)1557     public boolean setDualMonoMode(@DualMonoMode int dualMonoMode) {
1558         if (!isValidDualMonoMode(dualMonoMode)) {
1559             throw new IllegalArgumentException(
1560                     "Invalid Dual Mono mode " + dualMonoMode);
1561         }
1562         return native_set_dual_mono_mode(dualMonoMode) == SUCCESS;
1563     }
1564 
1565     /**
1566      * Returns the Dual Mono mode presentation setting.
1567      *
1568      * If no Dual Mono presentation is available for the output device,
1569      * then {@link #DUAL_MONO_MODE_OFF} is returned.
1570      *
1571      * @return one of {@link #DUAL_MONO_MODE_OFF},
1572      *     {@link #DUAL_MONO_MODE_LR},
1573      *     {@link #DUAL_MONO_MODE_LL},
1574      *     {@link #DUAL_MONO_MODE_RR}.
1575      */
getDualMonoMode()1576     public @DualMonoMode int getDualMonoMode() {
1577         int[] dualMonoMode = { DUAL_MONO_MODE_OFF };
1578         try {
1579             final int status = native_get_dual_mono_mode(dualMonoMode);
1580             if (status != SUCCESS || !isValidDualMonoMode(dualMonoMode[0])) {
1581                 return DUAL_MONO_MODE_OFF;
1582             }
1583         } catch (Exception e) {
1584             return DUAL_MONO_MODE_OFF;
1585         }
1586         return dualMonoMode[0];
1587     }
1588 
1589     // mask of all the positional channels supported, however the allowed combinations
1590     // are further restricted by the matching left/right rule and
1591     // AudioSystem.OUT_CHANNEL_COUNT_MAX
1592     private static final int SUPPORTED_OUT_CHANNELS =
1593             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
1594             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
1595             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
1596             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
1597             AudioFormat.CHANNEL_OUT_BACK_LEFT |
1598             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
1599             AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER |
1600             AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER |
1601             AudioFormat.CHANNEL_OUT_BACK_CENTER |
1602             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
1603             AudioFormat.CHANNEL_OUT_SIDE_RIGHT |
1604             AudioFormat.CHANNEL_OUT_TOP_CENTER |
1605             AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT |
1606             AudioFormat.CHANNEL_OUT_TOP_FRONT_CENTER |
1607             AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT |
1608             AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT |
1609             AudioFormat.CHANNEL_OUT_TOP_BACK_CENTER |
1610             AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT |
1611             AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT |
1612             AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT |
1613             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT |
1614             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_CENTER |
1615             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT |
1616             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY_2 |
1617             AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT |
1618             AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT;
1619 
1620     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
1621     // power saving to be automatically enabled for an AudioTrack. Returns false if
1622     // power saving is already enabled in the attributes parameter.
shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)1623     private static boolean shouldEnablePowerSaving(
1624             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
1625             int bufferSizeInBytes, int mode) {
1626         // If no attributes, OK
1627         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
1628         // Only consider flags that are not compatible with FLAG_DEEP_BUFFER. We include
1629         // FLAG_DEEP_BUFFER because if set the request is explicit and
1630         // shouldEnablePowerSaving() should return false.
1631         final int flags = attributes.getAllFlags()
1632                 & (AudioAttributes.FLAG_DEEP_BUFFER | AudioAttributes.FLAG_LOW_LATENCY
1633                     | AudioAttributes.FLAG_HW_AV_SYNC | AudioAttributes.FLAG_BEACON);
1634 
1635         if (attributes != null &&
1636                 (flags != 0  // cannot have any special flags
1637                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
1638                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
1639                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
1640                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
1641             return false;
1642         }
1643 
1644         // Format must be fully specified and be linear pcm
1645         if (format == null
1646                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
1647                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
1648                 || !AudioFormat.isValidEncoding(format.getEncoding())
1649                 || format.getChannelCount() < 1) {
1650             return false;
1651         }
1652 
1653         // Mode must be streaming
1654         if (mode != MODE_STREAM) {
1655             return false;
1656         }
1657 
1658         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
1659         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
1660         if (bufferSizeInBytes != 0) {
1661             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
1662             final int MILLIS_PER_SECOND = 1000;
1663             final long bufferTargetSize =
1664                     BUFFER_TARGET_MODE_STREAM_MS
1665                     * format.getChannelCount()
1666                     * format.getBytesPerSample(format.getEncoding())
1667                     * format.getSampleRate()
1668                     / MILLIS_PER_SECOND;
1669             if (bufferSizeInBytes < bufferTargetSize) {
1670                 return false;
1671             }
1672         }
1673 
1674         return true;
1675     }
1676 
1677     // Convenience method for the constructor's parameter checks.
1678     // This is where constructor IllegalArgumentException-s are thrown
1679     // postconditions:
1680     //    mChannelCount is valid
1681     //    mChannelMask is valid
1682     //    mAudioFormat is valid
1683     //    mSampleRate is valid
1684     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1685     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
1686                                  int audioFormat, int mode) {
1687         //--------------
1688         // sample rate, note these values are subject to change
1689         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
1690                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
1691                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
1692             throw new IllegalArgumentException(sampleRateInHz
1693                     + "Hz is not a supported sample rate.");
1694         }
1695         mSampleRate = sampleRateInHz;
1696 
1697         if (audioFormat == AudioFormat.ENCODING_IEC61937
1698                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO
1699                 && AudioFormat.channelCountFromOutChannelMask(channelConfig) != 8) {
1700             Log.w(TAG, "ENCODING_IEC61937 is configured with channel mask as " + channelConfig
1701                     + ", which is not 2 or 8 channels");
1702         }
1703 
1704         //--------------
1705         // channel config
1706         mChannelConfiguration = channelConfig;
1707 
1708         switch (channelConfig) {
1709         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
1710         case AudioFormat.CHANNEL_OUT_MONO:
1711         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1712             mChannelCount = 1;
1713             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
1714             break;
1715         case AudioFormat.CHANNEL_OUT_STEREO:
1716         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1717             mChannelCount = 2;
1718             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
1719             break;
1720         default:
1721             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
1722                 mChannelCount = 0;
1723                 break; // channel index configuration only
1724             }
1725             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
1726                 throw new IllegalArgumentException(
1727                         "Unsupported channel mask configuration " + channelConfig
1728                         + " for encoding " + audioFormat);
1729             }
1730             mChannelMask = channelConfig;
1731             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1732         }
1733         // check the channel index configuration (if present)
1734         mChannelIndexMask = channelIndexMask;
1735         if (mChannelIndexMask != 0) {
1736             // As of S, we accept up to 24 channel index mask.
1737             final int fullIndexMask = (1 << AudioSystem.FCC_24) - 1;
1738             final int channelIndexCount = Integer.bitCount(channelIndexMask);
1739             final boolean accepted = (channelIndexMask & ~fullIndexMask) == 0
1740                     && (!AudioFormat.isEncodingLinearFrames(audioFormat)  // compressed OK
1741                             || channelIndexCount <= AudioSystem.OUT_CHANNEL_COUNT_MAX); // PCM
1742             if (!accepted) {
1743                 throw new IllegalArgumentException(
1744                         "Unsupported channel index mask configuration " + channelIndexMask
1745                         + " for encoding " + audioFormat);
1746             }
1747             if (mChannelCount == 0) {
1748                  mChannelCount = channelIndexCount;
1749             } else if (mChannelCount != channelIndexCount) {
1750                 throw new IllegalArgumentException("Channel count must match");
1751             }
1752         }
1753 
1754         //--------------
1755         // audio format
1756         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
1757             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
1758         }
1759 
1760         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1761             throw new IllegalArgumentException("Unsupported audio encoding.");
1762         }
1763         mAudioFormat = audioFormat;
1764 
1765         //--------------
1766         // audio load mode
1767         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
1768                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
1769             throw new IllegalArgumentException("Invalid mode.");
1770         }
1771         mDataLoadMode = mode;
1772     }
1773 
1774     // General pair map
1775     private static final HashMap<String, Integer> CHANNEL_PAIR_MAP = new HashMap<>() {{
1776         put("front", AudioFormat.CHANNEL_OUT_FRONT_LEFT
1777                 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT);
1778         put("back", AudioFormat.CHANNEL_OUT_BACK_LEFT
1779                 | AudioFormat.CHANNEL_OUT_BACK_RIGHT);
1780         put("front of center", AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER
1781                 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER);
1782         put("side", AudioFormat.CHANNEL_OUT_SIDE_LEFT
1783                 | AudioFormat.CHANNEL_OUT_SIDE_RIGHT);
1784         put("top front", AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT
1785                 | AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT);
1786         put("top back", AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT
1787                 | AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT);
1788         put("top side", AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT
1789                 | AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT);
1790         put("bottom front", AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT
1791                 | AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT);
1792         put("front wide", AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT
1793                 | AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT);
1794     }};
1795 
1796     /**
1797      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
1798      * @param channelConfig the mask to validate
1799      * @return false if the AudioTrack can't be used with such a mask
1800      */
isMultichannelConfigSupported(int channelConfig, int encoding)1801     private static boolean isMultichannelConfigSupported(int channelConfig, int encoding) {
1802         // check for unsupported channels
1803         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
1804             loge("Channel configuration features unsupported channels");
1805             return false;
1806         }
1807         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1808         final int channelCountLimit;
1809         try {
1810             channelCountLimit = AudioFormat.isEncodingLinearFrames(encoding)
1811                     ? AudioSystem.OUT_CHANNEL_COUNT_MAX  // PCM limited to OUT_CHANNEL_COUNT_MAX
1812                     : AudioSystem.FCC_24;                // Compressed limited to 24 channels
1813         } catch (IllegalArgumentException iae) {
1814             loge("Unsupported encoding " + iae);
1815             return false;
1816         }
1817         if (channelCount > channelCountLimit) {
1818             loge("Channel configuration contains too many channels for encoding "
1819                     + encoding + "(" + channelCount + " > " + channelCountLimit + ")");
1820             return false;
1821         }
1822         // check for unsupported multichannel combinations:
1823         // - FL/FR must be present
1824         // - L/R channels must be paired (e.g. no single L channel)
1825         final int frontPair =
1826                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
1827         if ((channelConfig & frontPair) != frontPair) {
1828                 loge("Front channels must be present in multichannel configurations");
1829                 return false;
1830         }
1831         // Check all pairs to see that they are matched (front duplicated here).
1832         for (HashMap.Entry<String, Integer> e : CHANNEL_PAIR_MAP.entrySet()) {
1833             final int positionPair = e.getValue();
1834             if ((channelConfig & positionPair) != 0
1835                     && (channelConfig & positionPair) != positionPair) {
1836                 loge("Channel pair (" + e.getKey() + ") cannot be used independently");
1837                 return false;
1838             }
1839         }
1840         return true;
1841     }
1842 
1843 
1844     // Convenience method for the constructor's audio buffer size check.
1845     // preconditions:
1846     //    mChannelCount is valid
1847     //    mAudioFormat is valid
1848     // postcondition:
1849     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)1850     private void audioBuffSizeCheck(int audioBufferSize) {
1851         // NB: this section is only valid with PCM or IEC61937 data.
1852         //     To update when supporting compressed formats
1853         int frameSizeInBytes;
1854         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
1855             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
1856         } else {
1857             frameSizeInBytes = 1;
1858         }
1859         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
1860             throw new IllegalArgumentException("Invalid audio buffer size.");
1861         }
1862 
1863         mNativeBufferSizeInBytes = audioBufferSize;
1864         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
1865     }
1866 
1867 
1868     /**
1869      * Releases the native AudioTrack resources.
1870      */
release()1871     public void release() {
1872         synchronized (mStreamEventCbLock){
1873             endStreamEventHandling();
1874         }
1875         // even though native_release() stops the native AudioTrack, we need to stop
1876         // AudioTrack subclasses too.
1877         try {
1878             stop();
1879         } catch(IllegalStateException ise) {
1880             // don't raise an exception, we're releasing the resources.
1881         }
1882         baseRelease();
1883         native_release();
1884         synchronized (mPlayStateLock) {
1885             mState = STATE_UNINITIALIZED;
1886             mPlayState = PLAYSTATE_STOPPED;
1887             mPlayStateLock.notify();
1888         }
1889     }
1890 
1891     @Override
finalize()1892     protected void finalize() {
1893         tryToDisableNativeRoutingCallback();
1894         baseRelease();
1895         native_finalize();
1896     }
1897 
1898     //--------------------------------------------------------------------------
1899     // Getters
1900     //--------------------
1901     /**
1902      * Returns the minimum gain value, which is the constant 0.0.
1903      * Gain values less than 0.0 will be clamped to 0.0.
1904      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1905      * @return the minimum value, which is the constant 0.0.
1906      */
getMinVolume()1907     static public float getMinVolume() {
1908         return GAIN_MIN;
1909     }
1910 
1911     /**
1912      * Returns the maximum gain value, which is greater than or equal to 1.0.
1913      * Gain values greater than the maximum will be clamped to the maximum.
1914      * <p>The word "volume" in the API name is historical; this is actually a gain.
1915      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
1916      * corresponds to a gain of 0 dB (sample values left unmodified).
1917      * @return the maximum value, which is greater than or equal to 1.0.
1918      */
getMaxVolume()1919     static public float getMaxVolume() {
1920         return GAIN_MAX;
1921     }
1922 
1923     /**
1924      * Returns the configured audio source sample rate in Hz.
1925      * The initial source sample rate depends on the constructor parameters,
1926      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
1927      * If the constructor had a specific sample rate, then the initial sink sample rate is that
1928      * value.
1929      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
1930      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
1931      */
getSampleRate()1932     public int getSampleRate() {
1933         return mSampleRate;
1934     }
1935 
1936     /**
1937      * Returns the current playback sample rate rate in Hz.
1938      */
getPlaybackRate()1939     public int getPlaybackRate() {
1940         return native_get_playback_rate();
1941     }
1942 
1943     /**
1944      * Returns the current playback parameters.
1945      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
1946      * @return current {@link PlaybackParams}.
1947      * @throws IllegalStateException if track is not initialized.
1948      */
getPlaybackParams()1949     public @NonNull PlaybackParams getPlaybackParams() {
1950         return native_get_playback_params();
1951     }
1952 
1953     /**
1954      * Returns the {@link AudioAttributes} used in configuration.
1955      * If a {@code streamType} is used instead of an {@code AudioAttributes}
1956      * to configure the AudioTrack
1957      * (the use of {@code streamType} for configuration is deprecated),
1958      * then the {@code AudioAttributes}
1959      * equivalent to the {@code streamType} is returned.
1960      * @return The {@code AudioAttributes} used to configure the AudioTrack.
1961      * @throws IllegalStateException If the track is not initialized.
1962      */
getAudioAttributes()1963     public @NonNull AudioAttributes getAudioAttributes() {
1964         if (mState == STATE_UNINITIALIZED || mConfiguredAudioAttributes == null) {
1965             throw new IllegalStateException("track not initialized");
1966         }
1967         return mConfiguredAudioAttributes;
1968     }
1969 
1970     /**
1971      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
1972      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1973      */
getAudioFormat()1974     public int getAudioFormat() {
1975         return mAudioFormat;
1976     }
1977 
1978     /**
1979      * Returns the volume stream type of this AudioTrack.
1980      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
1981      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
1982      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
1983      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
1984      * {@link AudioManager#STREAM_ACCESSIBILITY}.
1985      */
getStreamType()1986     public int getStreamType() {
1987         return mStreamType;
1988     }
1989 
1990     /**
1991      * Returns the configured channel position mask.
1992      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
1993      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
1994      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
1995      * a channel index mask was used. Consider
1996      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
1997      * which contains both the channel position mask and the channel index mask.
1998      */
getChannelConfiguration()1999     public int getChannelConfiguration() {
2000         return mChannelConfiguration;
2001     }
2002 
2003     /**
2004      * Returns the configured <code>AudioTrack</code> format.
2005      * @return an {@link AudioFormat} containing the
2006      * <code>AudioTrack</code> parameters at the time of configuration.
2007      */
getFormat()2008     public @NonNull AudioFormat getFormat() {
2009         AudioFormat.Builder builder = new AudioFormat.Builder()
2010             .setSampleRate(mSampleRate)
2011             .setEncoding(mAudioFormat);
2012         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
2013             builder.setChannelMask(mChannelConfiguration);
2014         }
2015         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
2016             builder.setChannelIndexMask(mChannelIndexMask);
2017         }
2018         return builder.build();
2019     }
2020 
2021     /**
2022      * Returns the configured number of channels.
2023      */
getChannelCount()2024     public int getChannelCount() {
2025         return mChannelCount;
2026     }
2027 
2028     /**
2029      * Returns the state of the AudioTrack instance. This is useful after the
2030      * AudioTrack instance has been created to check if it was initialized
2031      * properly. This ensures that the appropriate resources have been acquired.
2032      * @see #STATE_UNINITIALIZED
2033      * @see #STATE_INITIALIZED
2034      * @see #STATE_NO_STATIC_DATA
2035      */
getState()2036     public int getState() {
2037         return mState;
2038     }
2039 
2040     /**
2041      * Returns the playback state of the AudioTrack instance.
2042      * @see #PLAYSTATE_STOPPED
2043      * @see #PLAYSTATE_PAUSED
2044      * @see #PLAYSTATE_PLAYING
2045      */
getPlayState()2046     public int getPlayState() {
2047         synchronized (mPlayStateLock) {
2048             switch (mPlayState) {
2049                 case PLAYSTATE_STOPPING:
2050                     return PLAYSTATE_PLAYING;
2051                 case PLAYSTATE_PAUSED_STOPPING:
2052                     return PLAYSTATE_PAUSED;
2053                 default:
2054                     return mPlayState;
2055             }
2056         }
2057     }
2058 
2059 
2060     /**
2061      * Returns the effective size of the <code>AudioTrack</code> buffer
2062      * that the application writes to.
2063      * <p> This will be less than or equal to the result of
2064      * {@link #getBufferCapacityInFrames()}.
2065      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
2066      * <p> If the track is subsequently routed to a different output sink, the buffer
2067      * size and capacity may enlarge to accommodate.
2068      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2069      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2070      * the size of the <code>AudioTrack</code> buffer in bytes.
2071      * <p> See also {@link AudioManager#getProperty(String)} for key
2072      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2073      * @return current size in frames of the <code>AudioTrack</code> buffer.
2074      * @throws IllegalStateException if track is not initialized.
2075      */
getBufferSizeInFrames()2076     public @IntRange (from = 0) int getBufferSizeInFrames() {
2077         return native_get_buffer_size_frames();
2078     }
2079 
2080     /**
2081      * Limits the effective size of the <code>AudioTrack</code> buffer
2082      * that the application writes to.
2083      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
2084      * If a blocking write is used then the write will block until the data
2085      * can fit within this limit.
2086      * <p>Changing this limit modifies the latency associated with
2087      * the buffer for this track. A smaller size will give lower latency
2088      * but there may be more glitches due to buffer underruns.
2089      * <p>The actual size used may not be equal to this requested size.
2090      * It will be limited to a valid range with a maximum of
2091      * {@link #getBufferCapacityInFrames()}.
2092      * It may also be adjusted slightly for internal reasons.
2093      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
2094      * will be returned.
2095      * <p>This method is only supported for PCM audio.
2096      * It is not supported for compressed audio tracks.
2097      *
2098      * @param bufferSizeInFrames requested buffer size in frames
2099      * @return the actual buffer size in frames or an error code,
2100      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2101      * @throws IllegalStateException if track is not initialized.
2102      */
setBufferSizeInFrames(@ntRange from = 0) int bufferSizeInFrames)2103     public int setBufferSizeInFrames(@IntRange (from = 0) int bufferSizeInFrames) {
2104         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
2105             return ERROR_INVALID_OPERATION;
2106         }
2107         if (bufferSizeInFrames < 0) {
2108             return ERROR_BAD_VALUE;
2109         }
2110         return native_set_buffer_size_frames(bufferSizeInFrames);
2111     }
2112 
2113     /**
2114      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
2115      *  <p> If the track's creation mode is {@link #MODE_STATIC},
2116      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
2117      *  A static track's frame count will not change.
2118      *  <p> If the track's creation mode is {@link #MODE_STREAM},
2119      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
2120      *  For streaming tracks, this value may be rounded up to a larger value if needed by
2121      *  the target output sink, and
2122      *  if the track is subsequently routed to a different output sink, the
2123      *  frame count may enlarge to accommodate.
2124      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2125      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2126      *  the size of the <code>AudioTrack</code> buffer in bytes.
2127      *  <p> See also {@link AudioManager#getProperty(String)} for key
2128      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2129      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
2130      *  @throws IllegalStateException if track is not initialized.
2131      */
getBufferCapacityInFrames()2132     public @IntRange (from = 0) int getBufferCapacityInFrames() {
2133         return native_get_buffer_capacity_frames();
2134     }
2135 
2136     /**
2137      * Sets the streaming start threshold for an <code>AudioTrack</code>.
2138      * <p> The streaming start threshold is the buffer level that the written audio
2139      * data must reach for audio streaming to start after {@link #play()} is called.
2140      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2141      *
2142      * @param startThresholdInFrames the desired start threshold.
2143      * @return the actual start threshold in frames value. This is
2144      *         an integer between 1 to the buffer capacity
2145      *         (see {@link #getBufferCapacityInFrames()}),
2146      *         and might change if the output sink changes after track creation.
2147      * @throws IllegalStateException if the track is not initialized or the
2148      *         track transfer mode is not {@link #MODE_STREAM}.
2149      * @throws IllegalArgumentException if startThresholdInFrames is not positive.
2150      * @see #getStartThresholdInFrames()
2151      */
setStartThresholdInFrames( @ntRange from = 1) int startThresholdInFrames)2152     public @IntRange(from = 1) int setStartThresholdInFrames(
2153             @IntRange (from = 1) int startThresholdInFrames) {
2154         if (mState != STATE_INITIALIZED) {
2155             throw new IllegalStateException("AudioTrack is not initialized");
2156         }
2157         if (mDataLoadMode != MODE_STREAM) {
2158             throw new IllegalStateException("AudioTrack must be a streaming track");
2159         }
2160         if (startThresholdInFrames < 1) {
2161             throw new IllegalArgumentException("startThresholdInFrames "
2162                     + startThresholdInFrames + " must be positive");
2163         }
2164         return native_setStartThresholdInFrames(startThresholdInFrames);
2165     }
2166 
2167     /**
2168      * Returns the streaming start threshold of the <code>AudioTrack</code>.
2169      * <p> The streaming start threshold is the buffer level that the written audio
2170      * data must reach for audio streaming to start after {@link #play()} is called.
2171      * When an <code>AudioTrack</code> is created, the streaming start threshold
2172      * is the buffer capacity in frames. If the buffer size in frames is reduced
2173      * by {@link #setBufferSizeInFrames(int)} to a value smaller than the start threshold
2174      * then that value will be used instead for the streaming start threshold.
2175      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2176      *
2177      * @return the current start threshold in frames value. This is
2178      *         an integer between 1 to the buffer capacity
2179      *         (see {@link #getBufferCapacityInFrames()}),
2180      *         and might change if the  output sink changes after track creation.
2181      * @throws IllegalStateException if the track is not initialized or the
2182      *         track is not {@link #MODE_STREAM}.
2183      * @see #setStartThresholdInFrames(int)
2184      */
getStartThresholdInFrames()2185     public @IntRange (from = 1) int getStartThresholdInFrames() {
2186         if (mState != STATE_INITIALIZED) {
2187             throw new IllegalStateException("AudioTrack is not initialized");
2188         }
2189         if (mDataLoadMode != MODE_STREAM) {
2190             throw new IllegalStateException("AudioTrack must be a streaming track");
2191         }
2192         return native_getStartThresholdInFrames();
2193     }
2194 
2195     /**
2196      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
2197      *  @return current size in frames of the <code>AudioTrack</code> buffer.
2198      *  @throws IllegalStateException
2199      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
2200      */
2201     @Deprecated
getNativeFrameCount()2202     protected int getNativeFrameCount() {
2203         return native_get_buffer_capacity_frames();
2204     }
2205 
2206     /**
2207      * Returns marker position expressed in frames.
2208      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
2209      * or zero if marker is disabled.
2210      */
getNotificationMarkerPosition()2211     public int getNotificationMarkerPosition() {
2212         return native_get_marker_pos();
2213     }
2214 
2215     /**
2216      * Returns the notification update period expressed in frames.
2217      * Zero means that no position update notifications are being delivered.
2218      */
getPositionNotificationPeriod()2219     public int getPositionNotificationPeriod() {
2220         return native_get_pos_update_period();
2221     }
2222 
2223     /**
2224      * Returns the playback head position expressed in frames.
2225      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
2226      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
2227      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
2228      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
2229      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
2230      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
2231      * the total number of frames played since reset,
2232      * <i>not</i> the current offset within the buffer.
2233      */
getPlaybackHeadPosition()2234     public int getPlaybackHeadPosition() {
2235         return native_get_position();
2236     }
2237 
2238     /**
2239      * Returns this track's estimated latency in milliseconds. This includes the latency due
2240      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
2241      *
2242      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
2243      * a better solution.
2244      * @hide
2245      */
2246     @UnsupportedAppUsage(trackingBug = 130237544)
getLatency()2247     public int getLatency() {
2248         return native_get_latency();
2249     }
2250 
2251     /**
2252      * Returns the number of underrun occurrences in the application-level write buffer
2253      * since the AudioTrack was created.
2254      * An underrun occurs if the application does not write audio
2255      * data quickly enough, causing the buffer to underflow
2256      * and a potential audio glitch or pop.
2257      * <p>
2258      * Underruns are less likely when buffer sizes are large.
2259      * It may be possible to eliminate underruns by recreating the AudioTrack with
2260      * a larger buffer.
2261      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
2262      * effective size of the buffer.
2263      */
getUnderrunCount()2264     public int getUnderrunCount() {
2265         return native_get_underrun_count();
2266     }
2267 
2268     /**
2269      * Returns the current performance mode of the {@link AudioTrack}.
2270      *
2271      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
2272      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
2273      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
2274      * Use {@link AudioTrack.Builder#setPerformanceMode}
2275      * in the {@link AudioTrack.Builder} to enable a performance mode.
2276      * @throws IllegalStateException if track is not initialized.
2277      */
getPerformanceMode()2278     public @PerformanceMode int getPerformanceMode() {
2279         final int flags = native_get_flags();
2280         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
2281             return PERFORMANCE_MODE_LOW_LATENCY;
2282         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
2283             return PERFORMANCE_MODE_POWER_SAVING;
2284         } else {
2285             return PERFORMANCE_MODE_NONE;
2286         }
2287     }
2288 
2289     /**
2290      *  Returns the output sample rate in Hz for the specified stream type.
2291      */
getNativeOutputSampleRate(int streamType)2292     static public int getNativeOutputSampleRate(int streamType) {
2293         return native_get_output_sample_rate(streamType);
2294     }
2295 
2296     /**
2297      * Returns the estimated minimum buffer size required for an AudioTrack
2298      * object to be created in the {@link #MODE_STREAM} mode.
2299      * The size is an estimate because it does not consider either the route or the sink,
2300      * since neither is known yet.  Note that this size doesn't
2301      * guarantee a smooth playback under load, and higher values should be chosen according to
2302      * the expected frequency at which the buffer will be refilled with additional data to play.
2303      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
2304      * to a higher value than the initial source sample rate, be sure to configure the buffer size
2305      * based on the highest planned sample rate.
2306      * @param sampleRateInHz the source sample rate expressed in Hz.
2307      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
2308      * @param channelConfig describes the configuration of the audio channels.
2309      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
2310      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
2311      * @param audioFormat the format in which the audio data is represented.
2312      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
2313      *   {@link AudioFormat#ENCODING_PCM_8BIT},
2314      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
2315      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
2316      *   or {@link #ERROR} if unable to query for output properties,
2317      *   or the minimum buffer size expressed in bytes.
2318      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)2319     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
2320         int channelCount = 0;
2321         switch(channelConfig) {
2322         case AudioFormat.CHANNEL_OUT_MONO:
2323         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
2324             channelCount = 1;
2325             break;
2326         case AudioFormat.CHANNEL_OUT_STEREO:
2327         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
2328             channelCount = 2;
2329             break;
2330         default:
2331             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
2332                 loge("getMinBufferSize(): Invalid channel configuration.");
2333                 return ERROR_BAD_VALUE;
2334             } else {
2335                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
2336             }
2337         }
2338 
2339         if (!AudioFormat.isPublicEncoding(audioFormat)) {
2340             loge("getMinBufferSize(): Invalid audio format.");
2341             return ERROR_BAD_VALUE;
2342         }
2343 
2344         // sample rate, note these values are subject to change
2345         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
2346         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
2347                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
2348             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
2349             return ERROR_BAD_VALUE;
2350         }
2351 
2352         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
2353         if (size <= 0) {
2354             loge("getMinBufferSize(): error querying hardware");
2355             return ERROR;
2356         }
2357         else {
2358             return size;
2359         }
2360     }
2361 
2362     /**
2363      * Returns the audio session ID.
2364      *
2365      * @return the ID of the audio session this AudioTrack belongs to.
2366      */
getAudioSessionId()2367     public int getAudioSessionId() {
2368         return mSessionId;
2369     }
2370 
2371    /**
2372     * Poll for a timestamp on demand.
2373     * <p>
2374     * If you need to track timestamps during initial warmup or after a routing or mode change,
2375     * you should request a new timestamp periodically until the reported timestamps
2376     * show that the frame position is advancing, or until it becomes clear that
2377     * timestamps are unavailable for this route.
2378     * <p>
2379     * After the clock is advancing at a stable rate,
2380     * query for a new timestamp approximately once every 10 seconds to once per minute.
2381     * Calling this method more often is inefficient.
2382     * It is also counter-productive to call this method more often than recommended,
2383     * because the short-term differences between successive timestamp reports are not meaningful.
2384     * If you need a high-resolution mapping between frame position and presentation time,
2385     * consider implementing that at application level, based on low-resolution timestamps.
2386     * <p>
2387     * The audio data at the returned position may either already have been
2388     * presented, or may have not yet been presented but is committed to be presented.
2389     * It is not possible to request the time corresponding to a particular position,
2390     * or to request the (fractional) position corresponding to a particular time.
2391     * If you need such features, consider implementing them at application level.
2392     *
2393     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2394     *        and owned by caller.
2395     * @return true if a timestamp is available, or false if no timestamp is available.
2396     *         If a timestamp is available,
2397     *         the AudioTimestamp instance is filled in with a position in frame units, together
2398     *         with the estimated time when that frame was presented or is committed to
2399     *         be presented.
2400     *         In the case that no timestamp is available, any supplied instance is left unaltered.
2401     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
2402     *         or during and immediately after a route change.
2403     *         A timestamp is permanently unavailable for a given route if the route does not support
2404     *         timestamps.  In this case, the approximate frame position can be obtained
2405     *         using {@link #getPlaybackHeadPosition}.
2406     *         However, it may be useful to continue to query for
2407     *         timestamps occasionally, to recover after a route change.
2408     */
2409     // Add this text when the "on new timestamp" API is added:
2410     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)2411     public boolean getTimestamp(AudioTimestamp timestamp)
2412     {
2413         if (timestamp == null) {
2414             throw new IllegalArgumentException();
2415         }
2416         // It's unfortunate, but we have to either create garbage every time or use synchronized
2417         long[] longArray = new long[2];
2418         int ret = native_get_timestamp(longArray);
2419         if (ret != SUCCESS) {
2420             return false;
2421         }
2422         timestamp.framePosition = longArray[0];
2423         timestamp.nanoTime = longArray[1];
2424         return true;
2425     }
2426 
2427     /**
2428      * Poll for a timestamp on demand.
2429      * <p>
2430      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
2431      *
2432      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2433      *        and owned by caller.
2434      * @return {@link #SUCCESS} if a timestamp is available
2435      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
2436      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
2437      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
2438      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
2439      *         for the timestamp.
2440      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2441      *         needs to be recreated.
2442      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
2443      *         timestamps. In this case, the approximate frame position can be obtained
2444      *         using {@link #getPlaybackHeadPosition}.
2445      *
2446      *         The AudioTimestamp instance is filled in with a position in frame units, together
2447      *         with the estimated time when that frame was presented or is committed to
2448      *         be presented.
2449      * @hide
2450      */
2451      // Add this text when the "on new timestamp" API is added:
2452      //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestampWithStatus(AudioTimestamp timestamp)2453      public int getTimestampWithStatus(AudioTimestamp timestamp)
2454      {
2455          if (timestamp == null) {
2456              throw new IllegalArgumentException();
2457          }
2458          // It's unfortunate, but we have to either create garbage every time or use synchronized
2459          long[] longArray = new long[2];
2460          int ret = native_get_timestamp(longArray);
2461          timestamp.framePosition = longArray[0];
2462          timestamp.nanoTime = longArray[1];
2463          return ret;
2464      }
2465 
2466     /**
2467      *  Return Metrics data about the current AudioTrack instance.
2468      *
2469      * @return a {@link PersistableBundle} containing the set of attributes and values
2470      * available for the media being handled by this instance of AudioTrack
2471      * The attributes are descibed in {@link MetricsConstants}.
2472      *
2473      * Additional vendor-specific fields may also be present in
2474      * the return value.
2475      */
getMetrics()2476     public PersistableBundle getMetrics() {
2477         PersistableBundle bundle = native_getMetrics();
2478         return bundle;
2479     }
2480 
native_getMetrics()2481     private native PersistableBundle native_getMetrics();
2482 
2483     //--------------------------------------------------------------------------
2484     // Initialization / configuration
2485     //--------------------
2486     /**
2487      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2488      * for each periodic playback head position update.
2489      * Notifications will be received in the same thread as the one in which the AudioTrack
2490      * instance was created.
2491      * @param listener
2492      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)2493     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
2494         setPlaybackPositionUpdateListener(listener, null);
2495     }
2496 
2497     /**
2498      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2499      * for each periodic playback head position update.
2500      * Use this method to receive AudioTrack events in the Handler associated with another
2501      * thread than the one in which you created the AudioTrack instance.
2502      * @param listener
2503      * @param handler the Handler that will receive the event notification messages.
2504      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)2505     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
2506                                                     Handler handler) {
2507         if (listener != null) {
2508             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
2509         } else {
2510             mEventHandlerDelegate = null;
2511         }
2512     }
2513 
2514 
clampGainOrLevel(float gainOrLevel)2515     private static float clampGainOrLevel(float gainOrLevel) {
2516         if (Float.isNaN(gainOrLevel)) {
2517             throw new IllegalArgumentException();
2518         }
2519         if (gainOrLevel < GAIN_MIN) {
2520             gainOrLevel = GAIN_MIN;
2521         } else if (gainOrLevel > GAIN_MAX) {
2522             gainOrLevel = GAIN_MAX;
2523         }
2524         return gainOrLevel;
2525     }
2526 
2527 
2528      /**
2529      * Sets the specified left and right output gain values on the AudioTrack.
2530      * <p>Gain values are clamped to the closed interval [0.0, max] where
2531      * max is the value of {@link #getMaxVolume}.
2532      * A value of 0.0 results in zero gain (silence), and
2533      * a value of 1.0 means unity gain (signal unchanged).
2534      * The default value is 1.0 meaning unity gain.
2535      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2536      * @param leftGain output gain for the left channel.
2537      * @param rightGain output gain for the right channel
2538      * @return error code or success, see {@link #SUCCESS},
2539      *    {@link #ERROR_INVALID_OPERATION}
2540      * @deprecated Applications should use {@link #setVolume} instead, as it
2541      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2542      */
2543     @Deprecated
setStereoVolume(float leftGain, float rightGain)2544     public int setStereoVolume(float leftGain, float rightGain) {
2545         if (mState == STATE_UNINITIALIZED) {
2546             return ERROR_INVALID_OPERATION;
2547         }
2548 
2549         baseSetVolume(leftGain, rightGain);
2550         return SUCCESS;
2551     }
2552 
2553     @Override
playerSetVolume(boolean muting, float leftVolume, float rightVolume)2554     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
2555         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
2556         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
2557 
2558         native_setVolume(leftVolume, rightVolume);
2559     }
2560 
2561 
2562     /**
2563      * Sets the specified output gain value on all channels of this track.
2564      * <p>Gain values are clamped to the closed interval [0.0, max] where
2565      * max is the value of {@link #getMaxVolume}.
2566      * A value of 0.0 results in zero gain (silence), and
2567      * a value of 1.0 means unity gain (signal unchanged).
2568      * The default value is 1.0 meaning unity gain.
2569      * <p>This API is preferred over {@link #setStereoVolume}, as it
2570      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2571      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2572      * @param gain output gain for all channels.
2573      * @return error code or success, see {@link #SUCCESS},
2574      *    {@link #ERROR_INVALID_OPERATION}
2575      */
setVolume(float gain)2576     public int setVolume(float gain) {
2577         return setStereoVolume(gain, gain);
2578     }
2579 
2580     @Override
playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)2581     /* package */ int playerApplyVolumeShaper(
2582             @NonNull VolumeShaper.Configuration configuration,
2583             @NonNull VolumeShaper.Operation operation) {
2584         return native_applyVolumeShaper(configuration, operation);
2585     }
2586 
2587     @Override
playerGetVolumeShaperState(int id)2588     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
2589         return native_getVolumeShaperState(id);
2590     }
2591 
2592     @Override
createVolumeShaper( @onNull VolumeShaper.Configuration configuration)2593     public @NonNull VolumeShaper createVolumeShaper(
2594             @NonNull VolumeShaper.Configuration configuration) {
2595         return new VolumeShaper(configuration, this);
2596     }
2597 
2598     /**
2599      * Sets the playback sample rate for this track. This sets the sampling rate at which
2600      * the audio data will be consumed and played back
2601      * (as set by the sampleRateInHz parameter in the
2602      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
2603      * not the original sampling rate of the
2604      * content. For example, setting it to half the sample rate of the content will cause the
2605      * playback to last twice as long, but will also result in a pitch shift down by one octave.
2606      * The valid sample rate range is from 1 Hz to twice the value returned by
2607      * {@link #getNativeOutputSampleRate(int)}.
2608      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
2609      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
2610      * for playback of content of differing sample rate,
2611      * but with identical encoding and channel mask.
2612      * @param sampleRateInHz the sample rate expressed in Hz
2613      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2614      *    {@link #ERROR_INVALID_OPERATION}
2615      */
setPlaybackRate(int sampleRateInHz)2616     public int setPlaybackRate(int sampleRateInHz) {
2617         if (mState != STATE_INITIALIZED) {
2618             return ERROR_INVALID_OPERATION;
2619         }
2620         if (sampleRateInHz <= 0) {
2621             return ERROR_BAD_VALUE;
2622         }
2623         return native_set_playback_rate(sampleRateInHz);
2624     }
2625 
2626 
2627     /**
2628      * Sets the playback parameters.
2629      * This method returns failure if it cannot apply the playback parameters.
2630      * One possible cause is that the parameters for speed or pitch are out of range.
2631      * Another possible cause is that the <code>AudioTrack</code> is streaming
2632      * (see {@link #MODE_STREAM}) and the
2633      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
2634      * on configuration must be larger than the speed multiplied by the minimum size
2635      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
2636      * @param params see {@link PlaybackParams}. In particular,
2637      * speed, pitch, and audio mode should be set.
2638      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
2639      * @throws IllegalStateException if track is not initialized.
2640      */
setPlaybackParams(@onNull PlaybackParams params)2641     public void setPlaybackParams(@NonNull PlaybackParams params) {
2642         if (params == null) {
2643             throw new IllegalArgumentException("params is null");
2644         }
2645         native_set_playback_params(params);
2646     }
2647 
2648 
2649     /**
2650      * Sets the position of the notification marker.  At most one marker can be active.
2651      * @param markerInFrames marker position in wrapping frame units similar to
2652      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
2653      * To set a marker at a position which would appear as zero due to wraparound,
2654      * a workaround is to use a non-zero position near zero, such as -1 or 1.
2655      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2656      *  {@link #ERROR_INVALID_OPERATION}
2657      */
setNotificationMarkerPosition(int markerInFrames)2658     public int setNotificationMarkerPosition(int markerInFrames) {
2659         if (mState == STATE_UNINITIALIZED) {
2660             return ERROR_INVALID_OPERATION;
2661         }
2662         return native_set_marker_pos(markerInFrames);
2663     }
2664 
2665 
2666     /**
2667      * Sets the period for the periodic notification event.
2668      * @param periodInFrames update period expressed in frames.
2669      * Zero period means no position updates.  A negative period is not allowed.
2670      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
2671      */
setPositionNotificationPeriod(int periodInFrames)2672     public int setPositionNotificationPeriod(int periodInFrames) {
2673         if (mState == STATE_UNINITIALIZED) {
2674             return ERROR_INVALID_OPERATION;
2675         }
2676         return native_set_pos_update_period(periodInFrames);
2677     }
2678 
2679 
2680     /**
2681      * Sets the playback head position within the static buffer.
2682      * The track must be stopped or paused for the position to be changed,
2683      * and must use the {@link #MODE_STATIC} mode.
2684      * @param positionInFrames playback head position within buffer, expressed in frames.
2685      * Zero corresponds to start of buffer.
2686      * The position must not be greater than the buffer size in frames, or negative.
2687      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
2688      * the position values have different meanings.
2689      * <br>
2690      * If looping is currently enabled and the new position is greater than or equal to the
2691      * loop end marker, the behavior varies by API level:
2692      * as of {@link android.os.Build.VERSION_CODES#M},
2693      * the looping is first disabled and then the position is set.
2694      * For earlier API levels, the behavior is unspecified.
2695      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2696      *    {@link #ERROR_INVALID_OPERATION}
2697      */
setPlaybackHeadPosition(@ntRange from = 0) int positionInFrames)2698     public int setPlaybackHeadPosition(@IntRange (from = 0) int positionInFrames) {
2699         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2700                 getPlayState() == PLAYSTATE_PLAYING) {
2701             return ERROR_INVALID_OPERATION;
2702         }
2703         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
2704             return ERROR_BAD_VALUE;
2705         }
2706         return native_set_position(positionInFrames);
2707     }
2708 
2709     /**
2710      * Sets the loop points and the loop count. The loop can be infinite.
2711      * Similarly to setPlaybackHeadPosition,
2712      * the track must be stopped or paused for the loop points to be changed,
2713      * and must use the {@link #MODE_STATIC} mode.
2714      * @param startInFrames loop start marker expressed in frames.
2715      * Zero corresponds to start of buffer.
2716      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
2717      * @param endInFrames loop end marker expressed in frames.
2718      * The total buffer size in frames corresponds to end of buffer.
2719      * The end marker must not be greater than the buffer size in frames.
2720      * For looping, the end marker must not be less than or equal to the start marker,
2721      * but to disable looping
2722      * it is permitted for start marker, end marker, and loop count to all be 0.
2723      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
2724      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
2725      * support,
2726      * {@link #ERROR_BAD_VALUE} is returned.
2727      * The loop range is the interval [startInFrames, endInFrames).
2728      * <br>
2729      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
2730      * unless it is greater than or equal to the loop end marker, in which case
2731      * it is forced to the loop start marker.
2732      * For earlier API levels, the effect on position is unspecified.
2733      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
2734      *    A value of -1 means infinite looping, and 0 disables looping.
2735      *    A value of positive N means to "loop" (go back) N times.  For example,
2736      *    a value of one means to play the region two times in total.
2737      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2738      *    {@link #ERROR_INVALID_OPERATION}
2739      */
setLoopPoints(@ntRange from = 0) int startInFrames, @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount)2740     public int setLoopPoints(@IntRange (from = 0) int startInFrames,
2741             @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount) {
2742         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2743                 getPlayState() == PLAYSTATE_PLAYING) {
2744             return ERROR_INVALID_OPERATION;
2745         }
2746         if (loopCount == 0) {
2747             ;   // explicitly allowed as an exception to the loop region range check
2748         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
2749                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
2750             return ERROR_BAD_VALUE;
2751         }
2752         return native_set_loop(startInFrames, endInFrames, loopCount);
2753     }
2754 
2755     /**
2756      * Sets the audio presentation.
2757      * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned.
2758      * If a multi-stream decoder (MSD) is not present, or the format does not support
2759      * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned.
2760      * {@link #ERROR} is returned in case of any other error.
2761      * @param presentation see {@link AudioPresentation}. In particular, id should be set.
2762      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR},
2763      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2764      * @throws IllegalArgumentException if the audio presentation is null.
2765      * @throws IllegalStateException if track is not initialized.
2766      */
setPresentation(@onNull AudioPresentation presentation)2767     public int setPresentation(@NonNull AudioPresentation presentation) {
2768         if (presentation == null) {
2769             throw new IllegalArgumentException("audio presentation is null");
2770         }
2771         return native_setPresentation(presentation.getPresentationId(),
2772                 presentation.getProgramId());
2773     }
2774 
2775     /**
2776      * Sets the initialization state of the instance. This method was originally intended to be used
2777      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
2778      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
2779      * @param state the state of the AudioTrack instance
2780      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
2781      */
2782     @Deprecated
setState(int state)2783     protected void setState(int state) {
2784         mState = state;
2785     }
2786 
2787 
2788     //---------------------------------------------------------
2789     // Transport control methods
2790     //--------------------
2791     /**
2792      * Starts playing an AudioTrack.
2793      * <p>
2794      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
2795      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
2796      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
2797      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
2798      * play().
2799      * <p>
2800      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
2801      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
2802      * If you don't call write() first, or if you call write() but with an insufficient amount of
2803      * data, then the track will be in underrun state at play().  In this case,
2804      * playback will not actually start playing until the data path is filled to a
2805      * device-specific minimum level.  This requirement for the path to be filled
2806      * to a minimum level is also true when resuming audio playback after calling stop().
2807      * Similarly the buffer will need to be filled up again after
2808      * the track underruns due to failure to call write() in a timely manner with sufficient data.
2809      * For portability, an application should prime the data path to the maximum allowed
2810      * by writing data until the write() method returns a short transfer count.
2811      * This allows play() to start immediately, and reduces the chance of underrun.
2812      *
2813      * @throws IllegalStateException if the track isn't properly initialized
2814      */
play()2815     public void play()
2816     throws IllegalStateException {
2817         if (mState != STATE_INITIALIZED) {
2818             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
2819         }
2820         //FIXME use lambda to pass startImpl to superclass
2821         final int delay = getStartDelayMs();
2822         if (delay == 0) {
2823             startImpl();
2824         } else {
2825             new Thread() {
2826                 public void run() {
2827                     try {
2828                         Thread.sleep(delay);
2829                     } catch (InterruptedException e) {
2830                         e.printStackTrace();
2831                     }
2832                     baseSetStartDelayMs(0);
2833                     try {
2834                         startImpl();
2835                     } catch (IllegalStateException e) {
2836                         // fail silently for a state exception when it is happening after
2837                         // a delayed start, as the player state could have changed between the
2838                         // call to start() and the execution of startImpl()
2839                     }
2840                 }
2841             }.start();
2842         }
2843     }
2844 
startImpl()2845     private void startImpl() {
2846         synchronized (mRoutingChangeListeners) {
2847             if (!mEnableSelfRoutingMonitor) {
2848                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
2849             }
2850         }
2851         synchronized(mPlayStateLock) {
2852             baseStart(0); // unknown device at this point
2853             native_start();
2854             // FIXME see b/179218630
2855             //baseStart(native_getRoutedDeviceId());
2856             if (mPlayState == PLAYSTATE_PAUSED_STOPPING) {
2857                 mPlayState = PLAYSTATE_STOPPING;
2858             } else {
2859                 mPlayState = PLAYSTATE_PLAYING;
2860                 mOffloadEosPending = false;
2861             }
2862         }
2863     }
2864 
2865     /**
2866      * Stops playing the audio data.
2867      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
2868      * after the last buffer that was written has been played. For an immediate stop, use
2869      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
2870      * back yet.
2871      * @throws IllegalStateException
2872      */
stop()2873     public void stop()
2874     throws IllegalStateException {
2875         if (mState != STATE_INITIALIZED) {
2876             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
2877         }
2878 
2879         // stop playing
2880         synchronized(mPlayStateLock) {
2881             native_stop();
2882             baseStop();
2883             if (mOffloaded && mPlayState != PLAYSTATE_PAUSED_STOPPING) {
2884                 mPlayState = PLAYSTATE_STOPPING;
2885             } else {
2886                 mPlayState = PLAYSTATE_STOPPED;
2887                 mOffloadEosPending = false;
2888                 mAvSyncHeader = null;
2889                 mAvSyncBytesRemaining = 0;
2890                 mPlayStateLock.notify();
2891             }
2892         }
2893         tryToDisableNativeRoutingCallback();
2894     }
2895 
2896     /**
2897      * Pauses the playback of the audio data. Data that has not been played
2898      * back will not be discarded. Subsequent calls to {@link #play} will play
2899      * this data back. See {@link #flush()} to discard this data.
2900      *
2901      * @throws IllegalStateException
2902      */
pause()2903     public void pause()
2904     throws IllegalStateException {
2905         if (mState != STATE_INITIALIZED) {
2906             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
2907         }
2908 
2909         // pause playback
2910         synchronized(mPlayStateLock) {
2911             native_pause();
2912             basePause();
2913             if (mPlayState == PLAYSTATE_STOPPING) {
2914                 mPlayState = PLAYSTATE_PAUSED_STOPPING;
2915             } else {
2916                 mPlayState = PLAYSTATE_PAUSED;
2917             }
2918         }
2919     }
2920 
2921 
2922     //---------------------------------------------------------
2923     // Audio data supply
2924     //--------------------
2925 
2926     /**
2927      * Flushes the audio data currently queued for playback. Any data that has
2928      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
2929      * or if the track's creation mode is not {@link #MODE_STREAM}.
2930      * <BR> Note that although data written but not yet presented is discarded, there is no
2931      * guarantee that all of the buffer space formerly used by that data
2932      * is available for a subsequent write.
2933      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
2934      * less than or equal to the total buffer size
2935      * may return a short actual transfer count.
2936      */
flush()2937     public void flush() {
2938         if (mState == STATE_INITIALIZED) {
2939             // flush the data in native layer
2940             native_flush();
2941             mAvSyncHeader = null;
2942             mAvSyncBytesRemaining = 0;
2943         }
2944 
2945     }
2946 
2947     /**
2948      * Writes the audio data to the audio sink for playback (streaming mode),
2949      * or copies audio data for later playback (static buffer mode).
2950      * The format specified in the AudioTrack constructor should be
2951      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2952      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2953      * <p>
2954      * In streaming mode, the write will normally block until all the data has been enqueued for
2955      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2956      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2957      * occurs during the write, then the write may return a short transfer count.
2958      * <p>
2959      * In static buffer mode, copies the data to the buffer starting at offset 0.
2960      * Note that the actual playback of this data might occur after this function returns.
2961      *
2962      * @param audioData the array that holds the data to play.
2963      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2964      *    starts.
2965      *    Must not be negative, or cause the data access to go out of bounds of the array.
2966      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2967      *    Must not be negative, or cause the data access to go out of bounds of the array.
2968      * @return zero or the positive number of bytes that were written, or one of the following
2969      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2970      *    not to exceed sizeInBytes.
2971      * <ul>
2972      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2973      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2974      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2975      *    needs to be recreated. The dead object error code is not returned if some data was
2976      *    successfully transferred. In this case, the error is returned at the next write()</li>
2977      * <li>{@link #ERROR} in case of other error</li>
2978      * </ul>
2979      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
2980      * set to  {@link #WRITE_BLOCKING}.
2981      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)2982     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
2983         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
2984     }
2985 
2986     /**
2987      * Writes the audio data to the audio sink for playback (streaming mode),
2988      * or copies audio data for later playback (static buffer mode).
2989      * The format specified in the AudioTrack constructor should be
2990      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2991      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2992      * <p>
2993      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2994      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2995      * for playback, and will return a full transfer count.  However, if the write mode is
2996      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2997      * interrupts the write by calling stop or pause, or an I/O error
2998      * occurs during the write, then the write may return a short transfer count.
2999      * <p>
3000      * In static buffer mode, copies the data to the buffer starting at offset 0,
3001      * and the write mode is ignored.
3002      * Note that the actual playback of this data might occur after this function returns.
3003      *
3004      * @param audioData the array that holds the data to play.
3005      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
3006      *    starts.
3007      *    Must not be negative, or cause the data access to go out of bounds of the array.
3008      * @param sizeInBytes the number of bytes to write in audioData after the offset.
3009      *    Must not be negative, or cause the data access to go out of bounds of the array.
3010      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3011      *     effect in static mode.
3012      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3013      *         to the audio sink.
3014      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3015      *     queuing as much audio data for playback as possible without blocking.
3016      * @return zero or the positive number of bytes that were written, or one of the following
3017      *    error codes. The number of bytes will be a multiple of the frame size in bytes
3018      *    not to exceed sizeInBytes.
3019      * <ul>
3020      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3021      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3022      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3023      *    needs to be recreated. The dead object error code is not returned if some data was
3024      *    successfully transferred. In this case, the error is returned at the next write()</li>
3025      * <li>{@link #ERROR} in case of other error</li>
3026      * </ul>
3027      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)3028     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
3029             @WriteMode int writeMode) {
3030         // Note: we allow writes of extended integers and compressed formats from a byte array.
3031         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
3032             return ERROR_INVALID_OPERATION;
3033         }
3034 
3035         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3036             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3037             return ERROR_BAD_VALUE;
3038         }
3039 
3040         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
3041                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
3042                 || (offsetInBytes + sizeInBytes > audioData.length)) {
3043             return ERROR_BAD_VALUE;
3044         }
3045 
3046         if (!blockUntilOffloadDrain(writeMode)) {
3047             return 0;
3048         }
3049 
3050         final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
3051                 writeMode == WRITE_BLOCKING);
3052 
3053         if ((mDataLoadMode == MODE_STATIC)
3054                 && (mState == STATE_NO_STATIC_DATA)
3055                 && (ret > 0)) {
3056             // benign race with respect to other APIs that read mState
3057             mState = STATE_INITIALIZED;
3058         }
3059 
3060         return ret;
3061     }
3062 
3063     /**
3064      * Writes the audio data to the audio sink for playback (streaming mode),
3065      * or copies audio data for later playback (static buffer mode).
3066      * The format specified in the AudioTrack constructor should be
3067      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3068      * <p>
3069      * In streaming mode, the write will normally block until all the data has been enqueued for
3070      * playback, and will return a full transfer count.  However, if the track is stopped or paused
3071      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
3072      * occurs during the write, then the write may return a short transfer count.
3073      * <p>
3074      * In static buffer mode, copies the data to the buffer starting at offset 0.
3075      * Note that the actual playback of this data might occur after this function returns.
3076      *
3077      * @param audioData the array that holds the data to play.
3078      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
3079      *     starts.
3080      *    Must not be negative, or cause the data access to go out of bounds of the array.
3081      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3082      *    Must not be negative, or cause the data access to go out of bounds of the array.
3083      * @return zero or the positive number of shorts that were written, or one of the following
3084      *    error codes. The number of shorts will be a multiple of the channel count not to
3085      *    exceed sizeInShorts.
3086      * <ul>
3087      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3088      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3089      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3090      *    needs to be recreated. The dead object error code is not returned if some data was
3091      *    successfully transferred. In this case, the error is returned at the next write()</li>
3092      * <li>{@link #ERROR} in case of other error</li>
3093      * </ul>
3094      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
3095      * set to  {@link #WRITE_BLOCKING}.
3096      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)3097     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
3098         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
3099     }
3100 
3101     /**
3102      * Writes the audio data to the audio sink for playback (streaming mode),
3103      * or copies audio data for later playback (static buffer mode).
3104      * The format specified in the AudioTrack constructor should be
3105      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3106      * <p>
3107      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3108      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3109      * for playback, and will return a full transfer count.  However, if the write mode is
3110      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3111      * interrupts the write by calling stop or pause, or an I/O error
3112      * occurs during the write, then the write may return a short transfer count.
3113      * <p>
3114      * In static buffer mode, copies the data to the buffer starting at offset 0.
3115      * Note that the actual playback of this data might occur after this function returns.
3116      *
3117      * @param audioData the array that holds the data to write.
3118      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
3119      *     starts.
3120      *    Must not be negative, or cause the data access to go out of bounds of the array.
3121      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3122      *    Must not be negative, or cause the data access to go out of bounds of the array.
3123      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3124      *     effect in static mode.
3125      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3126      *         to the audio sink.
3127      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3128      *     queuing as much audio data for playback as possible without blocking.
3129      * @return zero or the positive number of shorts that were written, or one of the following
3130      *    error codes. The number of shorts will be a multiple of the channel count not to
3131      *    exceed sizeInShorts.
3132      * <ul>
3133      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3134      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3135      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3136      *    needs to be recreated. The dead object error code is not returned if some data was
3137      *    successfully transferred. In this case, the error is returned at the next write()</li>
3138      * <li>{@link #ERROR} in case of other error</li>
3139      * </ul>
3140      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)3141     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
3142             @WriteMode int writeMode) {
3143 
3144         if (mState == STATE_UNINITIALIZED
3145                 || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT
3146                 // use ByteBuffer or byte[] instead for later encodings
3147                 || mAudioFormat > AudioFormat.ENCODING_LEGACY_SHORT_ARRAY_THRESHOLD) {
3148             return ERROR_INVALID_OPERATION;
3149         }
3150 
3151         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3152             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3153             return ERROR_BAD_VALUE;
3154         }
3155 
3156         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
3157                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
3158                 || (offsetInShorts + sizeInShorts > audioData.length)) {
3159             return ERROR_BAD_VALUE;
3160         }
3161 
3162         if (!blockUntilOffloadDrain(writeMode)) {
3163             return 0;
3164         }
3165 
3166         final int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
3167                 writeMode == WRITE_BLOCKING);
3168 
3169         if ((mDataLoadMode == MODE_STATIC)
3170                 && (mState == STATE_NO_STATIC_DATA)
3171                 && (ret > 0)) {
3172             // benign race with respect to other APIs that read mState
3173             mState = STATE_INITIALIZED;
3174         }
3175 
3176         return ret;
3177     }
3178 
3179     /**
3180      * Writes the audio data to the audio sink for playback (streaming mode),
3181      * or copies audio data for later playback (static buffer mode).
3182      * The format specified in the AudioTrack constructor should be
3183      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
3184      * <p>
3185      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3186      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3187      * for playback, and will return a full transfer count.  However, if the write mode is
3188      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3189      * interrupts the write by calling stop or pause, or an I/O error
3190      * occurs during the write, then the write may return a short transfer count.
3191      * <p>
3192      * In static buffer mode, copies the data to the buffer starting at offset 0,
3193      * and the write mode is ignored.
3194      * Note that the actual playback of this data might occur after this function returns.
3195      *
3196      * @param audioData the array that holds the data to write.
3197      *     The implementation does not clip for sample values within the nominal range
3198      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
3199      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
3200      *     that could add energy, such as reverb.  For the convenience of applications
3201      *     that compute samples using filters with non-unity gain,
3202      *     sample values +3 dB beyond the nominal range are permitted.
3203      *     However such values may eventually be limited or clipped, depending on various gains
3204      *     and later processing in the audio path.  Therefore applications are encouraged
3205      *     to provide samples values within the nominal range.
3206      * @param offsetInFloats the offset, expressed as a number of floats,
3207      *     in audioData where the data to write starts.
3208      *    Must not be negative, or cause the data access to go out of bounds of the array.
3209      * @param sizeInFloats the number of floats to write in audioData after the offset.
3210      *    Must not be negative, or cause the data access to go out of bounds of the array.
3211      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3212      *     effect in static mode.
3213      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3214      *         to the audio sink.
3215      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3216      *     queuing as much audio data for playback as possible without blocking.
3217      * @return zero or the positive number of floats that were written, or one of the following
3218      *    error codes. The number of floats will be a multiple of the channel count not to
3219      *    exceed sizeInFloats.
3220      * <ul>
3221      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3222      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3223      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3224      *    needs to be recreated. The dead object error code is not returned if some data was
3225      *    successfully transferred. In this case, the error is returned at the next write()</li>
3226      * <li>{@link #ERROR} in case of other error</li>
3227      * </ul>
3228      */
write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)3229     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
3230             @WriteMode int writeMode) {
3231 
3232         if (mState == STATE_UNINITIALIZED) {
3233             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3234             return ERROR_INVALID_OPERATION;
3235         }
3236 
3237         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
3238             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
3239             return ERROR_INVALID_OPERATION;
3240         }
3241 
3242         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3243             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3244             return ERROR_BAD_VALUE;
3245         }
3246 
3247         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
3248                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
3249                 || (offsetInFloats + sizeInFloats > audioData.length)) {
3250             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
3251             return ERROR_BAD_VALUE;
3252         }
3253 
3254         if (!blockUntilOffloadDrain(writeMode)) {
3255             return 0;
3256         }
3257 
3258         final int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
3259                 writeMode == WRITE_BLOCKING);
3260 
3261         if ((mDataLoadMode == MODE_STATIC)
3262                 && (mState == STATE_NO_STATIC_DATA)
3263                 && (ret > 0)) {
3264             // benign race with respect to other APIs that read mState
3265             mState = STATE_INITIALIZED;
3266         }
3267 
3268         return ret;
3269     }
3270 
3271 
3272     /**
3273      * Writes the audio data to the audio sink for playback (streaming mode),
3274      * or copies audio data for later playback (static buffer mode).
3275      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
3276      * <p>
3277      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3278      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3279      * for playback, and will return a full transfer count.  However, if the write mode is
3280      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3281      * interrupts the write by calling stop or pause, or an I/O error
3282      * occurs during the write, then the write may return a short transfer count.
3283      * <p>
3284      * In static buffer mode, copies the data to the buffer starting at offset 0,
3285      * and the write mode is ignored.
3286      * Note that the actual playback of this data might occur after this function returns.
3287      *
3288      * @param audioData the buffer that holds the data to write, starting at the position reported
3289      *     by <code>audioData.position()</code>.
3290      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3291      *     have been advanced to reflect the amount of data that was successfully written to
3292      *     the AudioTrack.
3293      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3294      *     that the number of bytes requested be a multiple of the frame size (sample size in
3295      *     bytes multiplied by the channel count).
3296      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3297      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3298      *     effect in static mode.
3299      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3300      *         to the audio sink.
3301      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3302      *     queuing as much audio data for playback as possible without blocking.
3303      * @return zero or the positive number of bytes that were written, or one of the following
3304      *    error codes.
3305      * <ul>
3306      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3307      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3308      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3309      *    needs to be recreated. The dead object error code is not returned if some data was
3310      *    successfully transferred. In this case, the error is returned at the next write()</li>
3311      * <li>{@link #ERROR} in case of other error</li>
3312      * </ul>
3313      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)3314     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3315             @WriteMode int writeMode) {
3316 
3317         if (mState == STATE_UNINITIALIZED) {
3318             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3319             return ERROR_INVALID_OPERATION;
3320         }
3321 
3322         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3323             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3324             return ERROR_BAD_VALUE;
3325         }
3326 
3327         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3328             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3329             return ERROR_BAD_VALUE;
3330         }
3331 
3332         if (!blockUntilOffloadDrain(writeMode)) {
3333             return 0;
3334         }
3335 
3336         int ret = 0;
3337         if (audioData.isDirect()) {
3338             ret = native_write_native_bytes(audioData,
3339                     audioData.position(), sizeInBytes, mAudioFormat,
3340                     writeMode == WRITE_BLOCKING);
3341         } else {
3342             ret = native_write_byte(NioUtils.unsafeArray(audioData),
3343                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
3344                     sizeInBytes, mAudioFormat,
3345                     writeMode == WRITE_BLOCKING);
3346         }
3347 
3348         if ((mDataLoadMode == MODE_STATIC)
3349                 && (mState == STATE_NO_STATIC_DATA)
3350                 && (ret > 0)) {
3351             // benign race with respect to other APIs that read mState
3352             mState = STATE_INITIALIZED;
3353         }
3354 
3355         if (ret > 0) {
3356             audioData.position(audioData.position() + ret);
3357         }
3358 
3359         return ret;
3360     }
3361 
3362     /**
3363      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
3364      * The blocking behavior will depend on the write mode.
3365      * @param audioData the buffer that holds the data to write, starting at the position reported
3366      *     by <code>audioData.position()</code>.
3367      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3368      *     have been advanced to reflect the amount of data that was successfully written to
3369      *     the AudioTrack.
3370      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3371      *     that the number of bytes requested be a multiple of the frame size (sample size in
3372      *     bytes multiplied by the channel count).
3373      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3374      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
3375      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3376      *         to the audio sink.
3377      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3378      *     queuing as much audio data for playback as possible without blocking.
3379      * @param timestamp The timestamp, in nanoseconds, of the first decodable audio frame in the
3380      *     provided audioData.
3381      * @return zero or the positive number of bytes that were written, or one of the following
3382      *    error codes.
3383      * <ul>
3384      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3385      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3386      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3387      *    needs to be recreated. The dead object error code is not returned if some data was
3388      *    successfully transferred. In this case, the error is returned at the next write()</li>
3389      * <li>{@link #ERROR} in case of other error</li>
3390      * </ul>
3391      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)3392     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3393             @WriteMode int writeMode, long timestamp) {
3394 
3395         if (mState == STATE_UNINITIALIZED) {
3396             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3397             return ERROR_INVALID_OPERATION;
3398         }
3399 
3400         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3401             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3402             return ERROR_BAD_VALUE;
3403         }
3404 
3405         if (mDataLoadMode != MODE_STREAM) {
3406             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
3407             return ERROR_INVALID_OPERATION;
3408         }
3409 
3410         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
3411             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
3412             return write(audioData, sizeInBytes, writeMode);
3413         }
3414 
3415         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3416             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3417             return ERROR_BAD_VALUE;
3418         }
3419 
3420         if (!blockUntilOffloadDrain(writeMode)) {
3421             return 0;
3422         }
3423 
3424         // create timestamp header if none exists
3425         if (mAvSyncHeader == null) {
3426             mAvSyncHeader = ByteBuffer.allocate(mOffset);
3427             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
3428             mAvSyncHeader.putInt(0x55550002);
3429         }
3430 
3431         if (mAvSyncBytesRemaining == 0) {
3432             mAvSyncHeader.putInt(4, sizeInBytes);
3433             mAvSyncHeader.putLong(8, timestamp);
3434             mAvSyncHeader.putInt(16, mOffset);
3435             mAvSyncHeader.position(0);
3436             mAvSyncBytesRemaining = sizeInBytes;
3437         }
3438 
3439         // write timestamp header if not completely written already
3440         int ret = 0;
3441         if (mAvSyncHeader.remaining() != 0) {
3442             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
3443             if (ret < 0) {
3444                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
3445                 mAvSyncHeader = null;
3446                 mAvSyncBytesRemaining = 0;
3447                 return ret;
3448             }
3449             if (mAvSyncHeader.remaining() > 0) {
3450                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
3451                 return 0;
3452             }
3453         }
3454 
3455         // write audio data
3456         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
3457         ret = write(audioData, sizeToWrite, writeMode);
3458         if (ret < 0) {
3459             Log.e(TAG, "AudioTrack.write() could not write audio data!");
3460             mAvSyncHeader = null;
3461             mAvSyncBytesRemaining = 0;
3462             return ret;
3463         }
3464 
3465         mAvSyncBytesRemaining -= ret;
3466 
3467         return ret;
3468     }
3469 
3470 
3471     /**
3472      * Sets the playback head position within the static buffer to zero,
3473      * that is it rewinds to start of static buffer.
3474      * The track must be stopped or paused, and
3475      * the track's creation mode must be {@link #MODE_STATIC}.
3476      * <p>
3477      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
3478      * {@link #getPlaybackHeadPosition()} to zero.
3479      * For earlier API levels, the reset behavior is unspecified.
3480      * <p>
3481      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
3482      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
3483      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
3484      *  {@link #ERROR_INVALID_OPERATION}
3485      */
reloadStaticData()3486     public int reloadStaticData() {
3487         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
3488             return ERROR_INVALID_OPERATION;
3489         }
3490         return native_reload_static();
3491     }
3492 
3493     /**
3494      * When an AudioTrack in offload mode is in STOPPING play state, wait until event STREAM_END is
3495      * received if blocking write or return with 0 frames written if non blocking mode.
3496      */
blockUntilOffloadDrain(int writeMode)3497     private boolean blockUntilOffloadDrain(int writeMode) {
3498         synchronized (mPlayStateLock) {
3499             while (mPlayState == PLAYSTATE_STOPPING || mPlayState == PLAYSTATE_PAUSED_STOPPING) {
3500                 if (writeMode == WRITE_NON_BLOCKING) {
3501                     return false;
3502                 }
3503                 try {
3504                     mPlayStateLock.wait();
3505                 } catch (InterruptedException e) {
3506                 }
3507             }
3508             return true;
3509         }
3510     }
3511 
3512     //--------------------------------------------------------------------------
3513     // Audio effects management
3514     //--------------------
3515 
3516     /**
3517      * Attaches an auxiliary effect to the audio track. A typical auxiliary
3518      * effect is a reverberation effect which can be applied on any sound source
3519      * that directs a certain amount of its energy to this effect. This amount
3520      * is defined by setAuxEffectSendLevel().
3521      * {@see #setAuxEffectSendLevel(float)}.
3522      * <p>After creating an auxiliary effect (e.g.
3523      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
3524      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
3525      * this method to attach the audio track to the effect.
3526      * <p>To detach the effect from the audio track, call this method with a
3527      * null effect id.
3528      *
3529      * @param effectId system wide unique id of the effect to attach
3530      * @return error code or success, see {@link #SUCCESS},
3531      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
3532      */
attachAuxEffect(int effectId)3533     public int attachAuxEffect(int effectId) {
3534         if (mState == STATE_UNINITIALIZED) {
3535             return ERROR_INVALID_OPERATION;
3536         }
3537         return native_attachAuxEffect(effectId);
3538     }
3539 
3540     /**
3541      * Sets the send level of the audio track to the attached auxiliary effect
3542      * {@link #attachAuxEffect(int)}.  Effect levels
3543      * are clamped to the closed interval [0.0, max] where
3544      * max is the value of {@link #getMaxVolume}.
3545      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
3546      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
3547      * this method must be called for the effect to be applied.
3548      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
3549      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
3550      * so an appropriate conversion from linear UI input x to level is:
3551      * x == 0 -&gt; level = 0
3552      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
3553      *
3554      * @param level linear send level
3555      * @return error code or success, see {@link #SUCCESS},
3556      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
3557      */
setAuxEffectSendLevel(@loatRangefrom = 0.0) float level)3558     public int setAuxEffectSendLevel(@FloatRange(from = 0.0) float level) {
3559         if (mState == STATE_UNINITIALIZED) {
3560             return ERROR_INVALID_OPERATION;
3561         }
3562         return baseSetAuxEffectSendLevel(level);
3563     }
3564 
3565     @Override
playerSetAuxEffectSendLevel(boolean muting, float level)3566     int playerSetAuxEffectSendLevel(boolean muting, float level) {
3567         level = clampGainOrLevel(muting ? 0.0f : level);
3568         int err = native_setAuxEffectSendLevel(level);
3569         return err == 0 ? SUCCESS : ERROR;
3570     }
3571 
3572     //--------------------------------------------------------------------------
3573     // Explicit Routing
3574     //--------------------
3575     private AudioDeviceInfo mPreferredDevice = null;
3576 
3577     /**
3578      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
3579      * the output from this AudioTrack.
3580      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
3581      *  If deviceInfo is null, default routing is restored.
3582      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
3583      * does not correspond to a valid audio output device.
3584      */
3585     @Override
setPreferredDevice(AudioDeviceInfo deviceInfo)3586     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
3587         // Do some validation....
3588         if (deviceInfo != null && !deviceInfo.isSink()) {
3589             return false;
3590         }
3591         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
3592         boolean status = native_setOutputDevice(preferredDeviceId);
3593         if (status == true) {
3594             synchronized (this) {
3595                 mPreferredDevice = deviceInfo;
3596             }
3597         }
3598         return status;
3599     }
3600 
3601     /**
3602      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
3603      * is not guaranteed to correspond to the actual device being used for playback.
3604      */
3605     @Override
getPreferredDevice()3606     public AudioDeviceInfo getPreferredDevice() {
3607         synchronized (this) {
3608             return mPreferredDevice;
3609         }
3610     }
3611 
3612     /**
3613      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
3614      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
3615      * <code>getRoutedDevice()</code> will return null.
3616      */
3617     @Override
getRoutedDevice()3618     public AudioDeviceInfo getRoutedDevice() {
3619         int deviceId = native_getRoutedDeviceId();
3620         if (deviceId == 0) {
3621             return null;
3622         }
3623         return AudioManager.getDeviceForPortId(deviceId, AudioManager.GET_DEVICES_OUTPUTS);
3624     }
3625 
tryToDisableNativeRoutingCallback()3626     private void tryToDisableNativeRoutingCallback() {
3627         synchronized (mRoutingChangeListeners) {
3628             if (mEnableSelfRoutingMonitor) {
3629                 mEnableSelfRoutingMonitor = false;
3630                 testDisableNativeRoutingCallbacksLocked();
3631             }
3632         }
3633     }
3634 
3635     /**
3636      * Call BEFORE adding a routing callback handler and when enabling self routing listener
3637      * @return returns true for success, false otherwise.
3638      */
3639     @GuardedBy("mRoutingChangeListeners")
testEnableNativeRoutingCallbacksLocked()3640     private boolean testEnableNativeRoutingCallbacksLocked() {
3641         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3642             try {
3643                 native_enableDeviceCallback();
3644                 return true;
3645             } catch (IllegalStateException e) {
3646                 if (Log.isLoggable(TAG, Log.DEBUG)) {
3647                     Log.d(TAG, "testEnableNativeRoutingCallbacks failed", e);
3648                 }
3649             }
3650         }
3651         return false;
3652     }
3653 
3654     /*
3655      * Call AFTER removing a routing callback handler and when disabling self routing listener.
3656      */
3657     @GuardedBy("mRoutingChangeListeners")
testDisableNativeRoutingCallbacksLocked()3658     private void testDisableNativeRoutingCallbacksLocked() {
3659         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3660             try {
3661                 native_disableDeviceCallback();
3662             } catch (IllegalStateException e) {
3663                 // Fail silently as track state could have changed in between stop
3664                 // and disabling routing callback
3665             }
3666         }
3667     }
3668 
3669     //--------------------------------------------------------------------------
3670     // (Re)Routing Info
3671     //--------------------
3672     /**
3673      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
3674      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
3675      * by an app to receive (re)routing notifications.
3676      */
3677     @GuardedBy("mRoutingChangeListeners")
3678     private ArrayMap<AudioRouting.OnRoutingChangedListener,
3679             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
3680 
3681     @GuardedBy("mRoutingChangeListeners")
3682     private boolean mEnableSelfRoutingMonitor;
3683 
3684    /**
3685     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
3686     * changes on this AudioTrack.
3687     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
3688     * notifications of rerouting events.
3689     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3690     * the callback. If <code>null</code>, the {@link Handler} associated with the main
3691     * {@link Looper} will be used.
3692     */
3693     @Override
addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)3694     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
3695             Handler handler) {
3696         synchronized (mRoutingChangeListeners) {
3697             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
3698                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
3699                 mRoutingChangeListeners.put(
3700                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
3701                                 handler != null ? handler : new Handler(mInitializationLooper)));
3702             }
3703         }
3704     }
3705 
3706     /**
3707      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
3708      * to receive rerouting notifications.
3709      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
3710      * to remove.
3711      */
3712     @Override
removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)3713     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
3714         synchronized (mRoutingChangeListeners) {
3715             if (mRoutingChangeListeners.containsKey(listener)) {
3716                 mRoutingChangeListeners.remove(listener);
3717             }
3718             testDisableNativeRoutingCallbacksLocked();
3719         }
3720     }
3721 
3722     //--------------------------------------------------------------------------
3723     // (Re)Routing Info
3724     //--------------------
3725     /**
3726      * Defines the interface by which applications can receive notifications of
3727      * routing changes for the associated {@link AudioTrack}.
3728      *
3729      * @deprecated users should switch to the general purpose
3730      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3731      */
3732     @Deprecated
3733     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
3734         /**
3735          * Called when the routing of an AudioTrack changes from either and
3736          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
3737          * retrieve the newly routed-to device.
3738          */
onRoutingChanged(AudioTrack audioTrack)3739         public void onRoutingChanged(AudioTrack audioTrack);
3740 
3741         @Override
onRoutingChanged(AudioRouting router)3742         default public void onRoutingChanged(AudioRouting router) {
3743             if (router instanceof AudioTrack) {
3744                 onRoutingChanged((AudioTrack) router);
3745             }
3746         }
3747     }
3748 
3749     /**
3750      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
3751      * on this AudioTrack.
3752      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
3753      * of rerouting events.
3754      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3755      * the callback. If <code>null</code>, the {@link Handler} associated with the main
3756      * {@link Looper} will be used.
3757      * @deprecated users should switch to the general purpose
3758      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3759      */
3760     @Deprecated
addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)3761     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
3762             android.os.Handler handler) {
3763         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
3764     }
3765 
3766     /**
3767      * Removes an {@link OnRoutingChangedListener} which has been previously added
3768      * to receive rerouting notifications.
3769      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
3770      * @deprecated users should switch to the general purpose
3771      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3772      */
3773     @Deprecated
removeOnRoutingChangedListener(OnRoutingChangedListener listener)3774     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
3775         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
3776     }
3777 
3778     /**
3779      * Sends device list change notification to all listeners.
3780      */
broadcastRoutingChange()3781     private void broadcastRoutingChange() {
3782         AudioManager.resetAudioPortGeneration();
3783         baseUpdateDeviceId(getRoutedDevice());
3784         synchronized (mRoutingChangeListeners) {
3785             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
3786                 delegate.notifyClient();
3787             }
3788         }
3789     }
3790 
3791     //--------------------------------------------------------------------------
3792     // Codec notifications
3793     //--------------------
3794 
3795     // OnCodecFormatChangedListener notifications uses an instance
3796     // of ListenerList to manage its listeners.
3797 
3798     private final Utils.ListenerList<AudioMetadataReadMap> mCodecFormatChangedListeners =
3799             new Utils.ListenerList();
3800 
3801     /**
3802      * Interface definition for a listener for codec format changes.
3803      */
3804     public interface OnCodecFormatChangedListener {
3805         /**
3806          * Called when the compressed codec format changes.
3807          *
3808          * @param audioTrack is the {@code AudioTrack} instance associated with the codec.
3809          * @param info is a {@link AudioMetadataReadMap} of values which contains decoded format
3810          *     changes reported by the codec.  Not all hardware
3811          *     codecs indicate codec format changes. Acceptable keys are taken from
3812          *     {@code AudioMetadata.Format.KEY_*} range, with the associated value type.
3813          */
onCodecFormatChanged( @onNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info)3814         void onCodecFormatChanged(
3815                 @NonNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info);
3816     }
3817 
3818     /**
3819      * Adds an {@link OnCodecFormatChangedListener} to receive notifications of
3820      * codec format change events on this {@code AudioTrack}.
3821      *
3822      * @param executor  Specifies the {@link Executor} object to control execution.
3823      *
3824      * @param listener The {@link OnCodecFormatChangedListener} interface to receive
3825      *     notifications of codec events.
3826      */
addOnCodecFormatChangedListener( @onNull @allbackExecutor Executor executor, @NonNull OnCodecFormatChangedListener listener)3827     public void addOnCodecFormatChangedListener(
3828             @NonNull @CallbackExecutor Executor executor,
3829             @NonNull OnCodecFormatChangedListener listener) { // NPE checks done by ListenerList.
3830         mCodecFormatChangedListeners.add(
3831                 listener, /* key for removal */
3832                 executor,
3833                 (int eventCode, AudioMetadataReadMap readMap) -> {
3834                     // eventCode is unused by this implementation.
3835                     listener.onCodecFormatChanged(this, readMap);
3836                 }
3837         );
3838     }
3839 
3840     /**
3841      * Removes an {@link OnCodecFormatChangedListener} which has been previously added
3842      * to receive codec format change events.
3843      *
3844      * @param listener The previously added {@link OnCodecFormatChangedListener} interface
3845      * to remove.
3846      */
removeOnCodecFormatChangedListener( @onNull OnCodecFormatChangedListener listener)3847     public void removeOnCodecFormatChangedListener(
3848             @NonNull OnCodecFormatChangedListener listener) {
3849         mCodecFormatChangedListeners.remove(listener);  // NPE checks done by ListenerList.
3850     }
3851 
3852     //---------------------------------------------------------
3853     // Interface definitions
3854     //--------------------
3855     /**
3856      * Interface definition for a callback to be invoked when the playback head position of
3857      * an AudioTrack has reached a notification marker or has increased by a certain period.
3858      */
3859     public interface OnPlaybackPositionUpdateListener  {
3860         /**
3861          * Called on the listener to notify it that the previously set marker has been reached
3862          * by the playback head.
3863          */
onMarkerReached(AudioTrack track)3864         void onMarkerReached(AudioTrack track);
3865 
3866         /**
3867          * Called on the listener to periodically notify it that the playback head has reached
3868          * a multiple of the notification period.
3869          */
onPeriodicNotification(AudioTrack track)3870         void onPeriodicNotification(AudioTrack track);
3871     }
3872 
3873     /**
3874      * Abstract class to receive event notifications about the stream playback in offloaded mode.
3875      * See {@link AudioTrack#registerStreamEventCallback(Executor, StreamEventCallback)} to register
3876      * the callback on the given {@link AudioTrack} instance.
3877      */
3878     public abstract static class StreamEventCallback {
3879         /**
3880          * Called when an offloaded track is no longer valid and has been discarded by the system.
3881          * An example of this happening is when an offloaded track has been paused too long, and
3882          * gets invalidated by the system to prevent any other offload.
3883          * @param track the {@link AudioTrack} on which the event happened.
3884          */
onTearDown(@onNull AudioTrack track)3885         public void onTearDown(@NonNull AudioTrack track) { }
3886         /**
3887          * Called when all the buffers of an offloaded track that were queued in the audio system
3888          * (e.g. the combination of the Android audio framework and the device's audio hardware)
3889          * have been played after {@link AudioTrack#stop()} has been called.
3890          * @param track the {@link AudioTrack} on which the event happened.
3891          */
onPresentationEnded(@onNull AudioTrack track)3892         public void onPresentationEnded(@NonNull AudioTrack track) { }
3893         /**
3894          * Called when more audio data can be written without blocking on an offloaded track.
3895          * @param track the {@link AudioTrack} on which the event happened.
3896          * @param sizeInFrames the number of frames available to write without blocking.
3897          *   Note that the frame size of a compressed stream is 1 byte.
3898          */
onDataRequest(@onNull AudioTrack track, @IntRange(from = 0) int sizeInFrames)3899         public void onDataRequest(@NonNull AudioTrack track, @IntRange(from = 0) int sizeInFrames) {
3900         }
3901     }
3902 
3903     /**
3904      * Registers a callback for the notification of stream events.
3905      * This callback can only be registered for instances operating in offloaded mode
3906      * (see {@link AudioTrack.Builder#setOffloadedPlayback(boolean)} and
3907      * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} for
3908      * more details).
3909      * @param executor {@link Executor} to handle the callbacks.
3910      * @param eventCallback the callback to receive the stream event notifications.
3911      */
registerStreamEventCallback(@onNull @allbackExecutor Executor executor, @NonNull StreamEventCallback eventCallback)3912     public void registerStreamEventCallback(@NonNull @CallbackExecutor Executor executor,
3913             @NonNull StreamEventCallback eventCallback) {
3914         if (eventCallback == null) {
3915             throw new IllegalArgumentException("Illegal null StreamEventCallback");
3916         }
3917         if (!mOffloaded) {
3918             throw new IllegalStateException(
3919                     "Cannot register StreamEventCallback on non-offloaded AudioTrack");
3920         }
3921         if (executor == null) {
3922             throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback");
3923         }
3924         synchronized (mStreamEventCbLock) {
3925             // check if eventCallback already in list
3926             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
3927                 if (seci.mStreamEventCb == eventCallback) {
3928                     throw new IllegalArgumentException(
3929                             "StreamEventCallback already registered");
3930                 }
3931             }
3932             beginStreamEventHandling();
3933             mStreamEventCbInfoList.add(new StreamEventCbInfo(executor, eventCallback));
3934         }
3935     }
3936 
3937     /**
3938      * Unregisters the callback for notification of stream events, previously registered
3939      * with {@link #registerStreamEventCallback(Executor, StreamEventCallback)}.
3940      * @param eventCallback the callback to unregister.
3941      */
unregisterStreamEventCallback(@onNull StreamEventCallback eventCallback)3942     public void unregisterStreamEventCallback(@NonNull StreamEventCallback eventCallback) {
3943         if (eventCallback == null) {
3944             throw new IllegalArgumentException("Illegal null StreamEventCallback");
3945         }
3946         if (!mOffloaded) {
3947             throw new IllegalStateException("No StreamEventCallback on non-offloaded AudioTrack");
3948         }
3949         synchronized (mStreamEventCbLock) {
3950             StreamEventCbInfo seciToRemove = null;
3951             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
3952                 if (seci.mStreamEventCb == eventCallback) {
3953                     // ok to remove while iterating over list as we exit iteration
3954                     mStreamEventCbInfoList.remove(seci);
3955                     if (mStreamEventCbInfoList.size() == 0) {
3956                         endStreamEventHandling();
3957                     }
3958                     return;
3959                 }
3960             }
3961             throw new IllegalArgumentException("StreamEventCallback was not registered");
3962         }
3963     }
3964 
3965     //---------------------------------------------------------
3966     // Offload
3967     //--------------------
3968     private static class StreamEventCbInfo {
3969         final Executor mStreamEventExec;
3970         final StreamEventCallback mStreamEventCb;
3971 
StreamEventCbInfo(Executor e, StreamEventCallback cb)3972         StreamEventCbInfo(Executor e, StreamEventCallback cb) {
3973             mStreamEventExec = e;
3974             mStreamEventCb = cb;
3975         }
3976     }
3977 
3978     private final Object mStreamEventCbLock = new Object();
3979     @GuardedBy("mStreamEventCbLock")
3980     @NonNull private LinkedList<StreamEventCbInfo> mStreamEventCbInfoList =
3981             new LinkedList<StreamEventCbInfo>();
3982     /**
3983      * Dedicated thread for handling the StreamEvent callbacks
3984      */
3985     private @Nullable HandlerThread mStreamEventHandlerThread;
3986     private @Nullable volatile StreamEventHandler mStreamEventHandler;
3987 
3988     /**
3989      * Called from native AudioTrack callback thread, filter messages if necessary
3990      * and repost event on AudioTrack message loop to prevent blocking native thread.
3991      * @param what event code received from native
3992      * @param arg optional argument for event
3993      */
handleStreamEventFromNative(int what, int arg)3994     void handleStreamEventFromNative(int what, int arg) {
3995         if (mStreamEventHandler == null) {
3996             return;
3997         }
3998         switch (what) {
3999             case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
4000                 // replace previous CAN_WRITE_MORE_DATA messages with the latest value
4001                 mStreamEventHandler.removeMessages(NATIVE_EVENT_CAN_WRITE_MORE_DATA);
4002                 mStreamEventHandler.sendMessage(
4003                         mStreamEventHandler.obtainMessage(
4004                                 NATIVE_EVENT_CAN_WRITE_MORE_DATA, arg, 0/*ignored*/));
4005                 break;
4006             case NATIVE_EVENT_NEW_IAUDIOTRACK:
4007                 mStreamEventHandler.sendMessage(
4008                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_NEW_IAUDIOTRACK));
4009                 break;
4010             case NATIVE_EVENT_STREAM_END:
4011                 mStreamEventHandler.sendMessage(
4012                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_STREAM_END));
4013                 break;
4014         }
4015     }
4016 
4017     private class StreamEventHandler extends Handler {
4018 
StreamEventHandler(Looper looper)4019         StreamEventHandler(Looper looper) {
4020             super(looper);
4021         }
4022 
4023         @Override
handleMessage(Message msg)4024         public void handleMessage(Message msg) {
4025             final LinkedList<StreamEventCbInfo> cbInfoList;
4026             synchronized (mStreamEventCbLock) {
4027                 if (msg.what == NATIVE_EVENT_STREAM_END) {
4028                     synchronized (mPlayStateLock) {
4029                         if (mPlayState == PLAYSTATE_STOPPING) {
4030                             if (mOffloadEosPending) {
4031                                 native_start();
4032                                 mPlayState = PLAYSTATE_PLAYING;
4033                             } else {
4034                                 mAvSyncHeader = null;
4035                                 mAvSyncBytesRemaining = 0;
4036                                 mPlayState = PLAYSTATE_STOPPED;
4037                             }
4038                             mOffloadEosPending = false;
4039                             mPlayStateLock.notify();
4040                         }
4041                     }
4042                 }
4043                 if (mStreamEventCbInfoList.size() == 0) {
4044                     return;
4045                 }
4046                 cbInfoList = new LinkedList<StreamEventCbInfo>(mStreamEventCbInfoList);
4047             }
4048 
4049             final long identity = Binder.clearCallingIdentity();
4050             try {
4051                 for (StreamEventCbInfo cbi : cbInfoList) {
4052                     switch (msg.what) {
4053                         case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
4054                             cbi.mStreamEventExec.execute(() ->
4055                                     cbi.mStreamEventCb.onDataRequest(AudioTrack.this, msg.arg1));
4056                             break;
4057                         case NATIVE_EVENT_NEW_IAUDIOTRACK:
4058                             // TODO also release track as it's not longer usable
4059                             cbi.mStreamEventExec.execute(() ->
4060                                     cbi.mStreamEventCb.onTearDown(AudioTrack.this));
4061                             break;
4062                         case NATIVE_EVENT_STREAM_END:
4063                             cbi.mStreamEventExec.execute(() ->
4064                                     cbi.mStreamEventCb.onPresentationEnded(AudioTrack.this));
4065                             break;
4066                     }
4067                 }
4068             } finally {
4069                 Binder.restoreCallingIdentity(identity);
4070             }
4071         }
4072     }
4073 
4074     @GuardedBy("mStreamEventCbLock")
beginStreamEventHandling()4075     private void beginStreamEventHandling() {
4076         if (mStreamEventHandlerThread == null) {
4077             mStreamEventHandlerThread = new HandlerThread(TAG + ".StreamEvent");
4078             mStreamEventHandlerThread.start();
4079             final Looper looper = mStreamEventHandlerThread.getLooper();
4080             if (looper != null) {
4081                 mStreamEventHandler = new StreamEventHandler(looper);
4082             }
4083         }
4084     }
4085 
4086     @GuardedBy("mStreamEventCbLock")
endStreamEventHandling()4087     private void endStreamEventHandling() {
4088         if (mStreamEventHandlerThread != null) {
4089             mStreamEventHandlerThread.quit();
4090             mStreamEventHandlerThread = null;
4091         }
4092     }
4093 
4094     /**
4095      * Sets a {@link LogSessionId} instance to this AudioTrack for metrics collection.
4096      *
4097      * @param logSessionId a {@link LogSessionId} instance which is used to
4098      *        identify this object to the metrics service. Proper generated
4099      *        Ids must be obtained from the Java metrics service and should
4100      *        be considered opaque. Use
4101      *        {@link LogSessionId#LOG_SESSION_ID_NONE} to remove the
4102      *        logSessionId association.
4103      * @throws IllegalStateException if AudioTrack not initialized.
4104      *
4105      */
setLogSessionId(@onNull LogSessionId logSessionId)4106     public void setLogSessionId(@NonNull LogSessionId logSessionId) {
4107         Objects.requireNonNull(logSessionId);
4108         if (mState == STATE_UNINITIALIZED) {
4109             throw new IllegalStateException("track not initialized");
4110         }
4111         String stringId = logSessionId.getStringId();
4112         native_setLogSessionId(stringId);
4113         mLogSessionId = logSessionId;
4114     }
4115 
4116     /**
4117      * Returns the {@link LogSessionId}.
4118      */
4119     @NonNull
getLogSessionId()4120     public LogSessionId getLogSessionId() {
4121         return mLogSessionId;
4122     }
4123 
4124     //---------------------------------------------------------
4125     // Inner classes
4126     //--------------------
4127     /**
4128      * Helper class to handle the forwarding of native events to the appropriate listener
4129      * (potentially) handled in a different thread
4130      */
4131     private class NativePositionEventHandlerDelegate {
4132         private final Handler mHandler;
4133 
NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)4134         NativePositionEventHandlerDelegate(final AudioTrack track,
4135                                    final OnPlaybackPositionUpdateListener listener,
4136                                    Handler handler) {
4137             // find the looper for our new event handler
4138             Looper looper;
4139             if (handler != null) {
4140                 looper = handler.getLooper();
4141             } else {
4142                 // no given handler, use the looper the AudioTrack was created in
4143                 looper = mInitializationLooper;
4144             }
4145 
4146             // construct the event handler with this looper
4147             if (looper != null) {
4148                 // implement the event handler delegate
4149                 mHandler = new Handler(looper) {
4150                     @Override
4151                     public void handleMessage(Message msg) {
4152                         if (track == null) {
4153                             return;
4154                         }
4155                         switch(msg.what) {
4156                         case NATIVE_EVENT_MARKER:
4157                             if (listener != null) {
4158                                 listener.onMarkerReached(track);
4159                             }
4160                             break;
4161                         case NATIVE_EVENT_NEW_POS:
4162                             if (listener != null) {
4163                                 listener.onPeriodicNotification(track);
4164                             }
4165                             break;
4166                         default:
4167                             loge("Unknown native event type: " + msg.what);
4168                             break;
4169                         }
4170                     }
4171                 };
4172             } else {
4173                 mHandler = null;
4174             }
4175         }
4176 
getHandler()4177         Handler getHandler() {
4178             return mHandler;
4179         }
4180     }
4181 
4182     //---------------------------------------------------------
4183     // Methods for IPlayer interface
4184     //--------------------
4185     @Override
playerStart()4186     void playerStart() {
4187         play();
4188     }
4189 
4190     @Override
playerPause()4191     void playerPause() {
4192         pause();
4193     }
4194 
4195     @Override
playerStop()4196     void playerStop() {
4197         stop();
4198     }
4199 
4200     //---------------------------------------------------------
4201     // Java methods called from the native side
4202     //--------------------
4203     @SuppressWarnings("unused")
4204     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)4205     private static void postEventFromNative(Object audiotrack_ref,
4206             int what, int arg1, int arg2, Object obj) {
4207         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
4208         final AudioTrack track = (AudioTrack) ((WeakReference) audiotrack_ref).get();
4209         if (track == null) {
4210             return;
4211         }
4212 
4213         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
4214             track.broadcastRoutingChange();
4215             return;
4216         }
4217 
4218         if (what == NATIVE_EVENT_CODEC_FORMAT_CHANGE) {
4219             ByteBuffer buffer = (ByteBuffer) obj;
4220             buffer.order(ByteOrder.nativeOrder());
4221             buffer.rewind();
4222             AudioMetadataReadMap audioMetaData = AudioMetadata.fromByteBuffer(buffer);
4223             if (audioMetaData == null) {
4224                 Log.e(TAG, "Unable to get audio metadata from byte buffer");
4225                 return;
4226             }
4227             track.mCodecFormatChangedListeners.notify(0 /* eventCode, unused */, audioMetaData);
4228             return;
4229         }
4230 
4231         if (what == NATIVE_EVENT_CAN_WRITE_MORE_DATA
4232                 || what == NATIVE_EVENT_NEW_IAUDIOTRACK
4233                 || what == NATIVE_EVENT_STREAM_END) {
4234             track.handleStreamEventFromNative(what, arg1);
4235             return;
4236         }
4237 
4238         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
4239         if (delegate != null) {
4240             Handler handler = delegate.getHandler();
4241             if (handler != null) {
4242                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
4243                 handler.sendMessage(m);
4244             }
4245         }
4246     }
4247 
4248     //---------------------------------------------------------
4249     // Native methods called from the Java side
4250     //--------------------
4251 
native_is_direct_output_supported(int encoding, int sampleRate, int channelMask, int channelIndexMask, int contentType, int usage, int flags)4252     private static native boolean native_is_direct_output_supported(int encoding, int sampleRate,
4253             int channelMask, int channelIndexMask, int contentType, int usage, int flags);
4254 
4255     // post-condition: mStreamType is overwritten with a value
4256     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
4257     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack, boolean offload, int encapsulationMode, Object tunerConfiguration, @NonNull String opPackageName)4258     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
4259             Object /*AudioAttributes*/ attributes,
4260             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
4261             int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack,
4262             boolean offload, int encapsulationMode, Object tunerConfiguration,
4263             @NonNull String opPackageName);
4264 
native_finalize()4265     private native final void native_finalize();
4266 
4267     /**
4268      * @hide
4269      */
4270     @UnsupportedAppUsage
native_release()4271     public native final void native_release();
4272 
native_start()4273     private native final void native_start();
4274 
native_stop()4275     private native final void native_stop();
4276 
native_pause()4277     private native final void native_pause();
4278 
native_flush()4279     private native final void native_flush();
4280 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)4281     private native final int native_write_byte(byte[] audioData,
4282                                                int offsetInBytes, int sizeInBytes, int format,
4283                                                boolean isBlocking);
4284 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)4285     private native final int native_write_short(short[] audioData,
4286                                                 int offsetInShorts, int sizeInShorts, int format,
4287                                                 boolean isBlocking);
4288 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)4289     private native final int native_write_float(float[] audioData,
4290                                                 int offsetInFloats, int sizeInFloats, int format,
4291                                                 boolean isBlocking);
4292 
native_write_native_bytes(ByteBuffer audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)4293     private native final int native_write_native_bytes(ByteBuffer audioData,
4294             int positionInBytes, int sizeInBytes, int format, boolean blocking);
4295 
native_reload_static()4296     private native final int native_reload_static();
4297 
native_get_buffer_size_frames()4298     private native final int native_get_buffer_size_frames();
native_set_buffer_size_frames(int bufferSizeInFrames)4299     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
native_get_buffer_capacity_frames()4300     private native final int native_get_buffer_capacity_frames();
4301 
native_setVolume(float leftVolume, float rightVolume)4302     private native final void native_setVolume(float leftVolume, float rightVolume);
4303 
native_set_playback_rate(int sampleRateInHz)4304     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()4305     private native final int native_get_playback_rate();
4306 
native_set_playback_params(@onNull PlaybackParams params)4307     private native final void native_set_playback_params(@NonNull PlaybackParams params);
native_get_playback_params()4308     private native final @NonNull PlaybackParams native_get_playback_params();
4309 
native_set_marker_pos(int marker)4310     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()4311     private native final int native_get_marker_pos();
4312 
native_set_pos_update_period(int updatePeriod)4313     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()4314     private native final int native_get_pos_update_period();
4315 
native_set_position(int position)4316     private native final int native_set_position(int position);
native_get_position()4317     private native final int native_get_position();
4318 
native_get_latency()4319     private native final int native_get_latency();
4320 
native_get_underrun_count()4321     private native final int native_get_underrun_count();
4322 
native_get_flags()4323     private native final int native_get_flags();
4324 
4325     // longArray must be a non-null array of length >= 2
4326     // [0] is assigned the frame position
4327     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)4328     private native final int native_get_timestamp(long[] longArray);
4329 
native_set_loop(int start, int end, int loopCount)4330     private native final int native_set_loop(int start, int end, int loopCount);
4331 
native_get_output_sample_rate(int streamType)4332     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)4333     static private native final int native_get_min_buff_size(
4334             int sampleRateInHz, int channelConfig, int audioFormat);
4335 
native_attachAuxEffect(int effectId)4336     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)4337     private native final int native_setAuxEffectSendLevel(float level);
4338 
native_setOutputDevice(int deviceId)4339     private native final boolean native_setOutputDevice(int deviceId);
native_getRoutedDeviceId()4340     private native final int native_getRoutedDeviceId();
native_enableDeviceCallback()4341     private native final void native_enableDeviceCallback();
native_disableDeviceCallback()4342     private native final void native_disableDeviceCallback();
4343 
native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)4344     private native int native_applyVolumeShaper(
4345             @NonNull VolumeShaper.Configuration configuration,
4346             @NonNull VolumeShaper.Operation operation);
4347 
native_getVolumeShaperState(int id)4348     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
native_setPresentation(int presentationId, int programId)4349     private native final int native_setPresentation(int presentationId, int programId);
4350 
native_getPortId()4351     private native int native_getPortId();
4352 
native_set_delay_padding(int delayInFrames, int paddingInFrames)4353     private native void native_set_delay_padding(int delayInFrames, int paddingInFrames);
4354 
native_set_audio_description_mix_level_db(float level)4355     private native int native_set_audio_description_mix_level_db(float level);
native_get_audio_description_mix_level_db(float[] level)4356     private native int native_get_audio_description_mix_level_db(float[] level);
native_set_dual_mono_mode(int dualMonoMode)4357     private native int native_set_dual_mono_mode(int dualMonoMode);
native_get_dual_mono_mode(int[] dualMonoMode)4358     private native int native_get_dual_mono_mode(int[] dualMonoMode);
native_setLogSessionId(@ullable String logSessionId)4359     private native void native_setLogSessionId(@Nullable String logSessionId);
native_setStartThresholdInFrames(int startThresholdInFrames)4360     private native int native_setStartThresholdInFrames(int startThresholdInFrames);
native_getStartThresholdInFrames()4361     private native int native_getStartThresholdInFrames();
4362 
4363     /**
4364      * Sets the audio service Player Interface Id.
4365      *
4366      * The playerIId does not change over the lifetime of the client
4367      * Java AudioTrack and is set automatically on creation.
4368      *
4369      * This call informs the native AudioTrack for metrics logging purposes.
4370      *
4371      * @param id the value reported by AudioManager when registering the track.
4372      *           A value of -1 indicates invalid - the playerIId was never set.
4373      * @throws IllegalStateException if AudioTrack not initialized.
4374      */
native_setPlayerIId(int playerIId)4375     private native void native_setPlayerIId(int playerIId);
4376 
4377     //---------------------------------------------------------
4378     // Utility methods
4379     //------------------
4380 
logd(String msg)4381     private static void logd(String msg) {
4382         Log.d(TAG, msg);
4383     }
4384 
loge(String msg)4385     private static void loge(String msg) {
4386         Log.e(TAG, msg);
4387     }
4388 
4389     public final static class MetricsConstants
4390     {
MetricsConstants()4391         private MetricsConstants() {}
4392 
4393         // MM_PREFIX is slightly different than TAG, used to avoid cut-n-paste errors.
4394         private static final String MM_PREFIX = "android.media.audiotrack.";
4395 
4396         /**
4397          * Key to extract the stream type for this track
4398          * from the {@link AudioTrack#getMetrics} return value.
4399          * This value may not exist in API level {@link android.os.Build.VERSION_CODES#P}.
4400          * The value is a {@code String}.
4401          */
4402         public static final String STREAMTYPE = MM_PREFIX + "streamtype";
4403 
4404         /**
4405          * Key to extract the attribute content type for this track
4406          * from the {@link AudioTrack#getMetrics} return value.
4407          * The value is a {@code String}.
4408          */
4409         public static final String CONTENTTYPE = MM_PREFIX + "type";
4410 
4411         /**
4412          * Key to extract the attribute usage for this track
4413          * from the {@link AudioTrack#getMetrics} return value.
4414          * The value is a {@code String}.
4415          */
4416         public static final String USAGE = MM_PREFIX + "usage";
4417 
4418         /**
4419          * Key to extract the sample rate for this track in Hz
4420          * from the {@link AudioTrack#getMetrics} return value.
4421          * The value is an {@code int}.
4422          * @deprecated This does not work. Use {@link AudioTrack#getSampleRate()} instead.
4423          */
4424         @Deprecated
4425         public static final String SAMPLERATE = "android.media.audiorecord.samplerate";
4426 
4427         /**
4428          * Key to extract the native channel mask information for this track
4429          * from the {@link AudioTrack#getMetrics} return value.
4430          *
4431          * The value is a {@code long}.
4432          * @deprecated This does not work. Use {@link AudioTrack#getFormat()} and read from
4433          * the returned format instead.
4434          */
4435         @Deprecated
4436         public static final String CHANNELMASK = "android.media.audiorecord.channelmask";
4437 
4438         /**
4439          * Use for testing only. Do not expose.
4440          * The current sample rate.
4441          * The value is an {@code int}.
4442          * @hide
4443          */
4444         @TestApi
4445         public static final String SAMPLE_RATE = MM_PREFIX + "sampleRate";
4446 
4447         /**
4448          * Use for testing only. Do not expose.
4449          * The native channel mask.
4450          * The value is a {@code long}.
4451          * @hide
4452          */
4453         @TestApi
4454         public static final String CHANNEL_MASK = MM_PREFIX + "channelMask";
4455 
4456         /**
4457          * Use for testing only. Do not expose.
4458          * The output audio data encoding.
4459          * The value is a {@code String}.
4460          * @hide
4461          */
4462         @TestApi
4463         public static final String ENCODING = MM_PREFIX + "encoding";
4464 
4465         /**
4466          * Use for testing only. Do not expose.
4467          * The port id of this track port in audioserver.
4468          * The value is an {@code int}.
4469          * @hide
4470          */
4471         @TestApi
4472         public static final String PORT_ID = MM_PREFIX + "portId";
4473 
4474         /**
4475          * Use for testing only. Do not expose.
4476          * The buffer frameCount.
4477          * The value is an {@code int}.
4478          * @hide
4479          */
4480         @TestApi
4481         public static final String FRAME_COUNT = MM_PREFIX + "frameCount";
4482 
4483         /**
4484          * Use for testing only. Do not expose.
4485          * The actual track attributes used.
4486          * The value is a {@code String}.
4487          * @hide
4488          */
4489         @TestApi
4490         public static final String ATTRIBUTES = MM_PREFIX + "attributes";
4491     }
4492 }
4493