1 /*
2  * Copyright 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "AudioStreamTrack"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20 
21 #include <stdint.h>
22 #include <media/AudioTrack.h>
23 
24 #include <aaudio/AAudio.h>
25 #include <system/audio.h>
26 
27 #include "core/AudioGlobal.h"
28 #include "legacy/AudioStreamLegacy.h"
29 #include "legacy/AudioStreamTrack.h"
30 #include "utility/AudioClock.h"
31 #include "utility/FixedBlockReader.h"
32 
33 using namespace android;
34 using namespace aaudio;
35 
36 using android::content::AttributionSourceState;
37 
38 // Arbitrary and somewhat generous number of bursts.
39 #define DEFAULT_BURSTS_PER_BUFFER_CAPACITY     8
40 
41 /*
42  * Create a stream that uses the AudioTrack.
43  */
AudioStreamTrack()44 AudioStreamTrack::AudioStreamTrack()
45     : AudioStreamLegacy()
46     , mFixedBlockReader(*this)
47 {
48 }
49 
~AudioStreamTrack()50 AudioStreamTrack::~AudioStreamTrack()
51 {
52     const aaudio_stream_state_t state = getState();
53     bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
54     ALOGE_IF(bad, "stream not closed, in state %d", state);
55 }
56 
open(const AudioStreamBuilder & builder)57 aaudio_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
58 {
59     aaudio_result_t result = AAUDIO_OK;
60 
61     result = AudioStream::open(builder);
62     if (result != OK) {
63         return result;
64     }
65 
66     const aaudio_session_id_t requestedSessionId = builder.getSessionId();
67     const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
68 
69     audio_channel_mask_t channelMask =
70             AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), false /*isInput*/);
71 
72     audio_output_flags_t flags;
73     aaudio_performance_mode_t perfMode = getPerformanceMode();
74     switch(perfMode) {
75         case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
76             // Bypass the normal mixer and go straight to the FAST mixer.
77             // If the app asks for a sessionId then it means they want to use effects.
78             // So don't use RAW flag.
79             flags = (audio_output_flags_t) ((requestedSessionId == AAUDIO_SESSION_ID_NONE)
80                     ? (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW)
81                     : (AUDIO_OUTPUT_FLAG_FAST));
82             break;
83 
84         case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
85             // This uses a mixer that wakes up less often than the FAST mixer.
86             flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
87             break;
88 
89         case AAUDIO_PERFORMANCE_MODE_NONE:
90         default:
91             // No flags. Use a normal mixer in front of the FAST mixer.
92             flags = AUDIO_OUTPUT_FLAG_NONE;
93             break;
94     }
95 
96     size_t frameCount = (size_t)builder.getBufferCapacity();
97 
98     // To avoid glitching, let AudioFlinger pick the optimal burst size.
99     int32_t notificationFrames = 0;
100 
101     const audio_format_t format = (getFormat() == AUDIO_FORMAT_DEFAULT)
102             ? AUDIO_FORMAT_PCM_FLOAT
103             : getFormat();
104 
105     // Setup the callback if there is one.
106     AudioTrack::callback_t callback = nullptr;
107     void *callbackData = nullptr;
108     // Note that TRANSFER_SYNC does not allow FAST track
109     AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
110     if (builder.getDataCallbackProc() != nullptr) {
111         streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
112         callback = getLegacyCallback();
113         callbackData = this;
114 
115         // If the total buffer size is unspecified then base the size on the burst size.
116         if (frameCount == 0
117                 && ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0)) {
118             // Take advantage of a special trick that allows us to create a buffer
119             // that is some multiple of the burst size.
120             notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
121         }
122     }
123     mCallbackBufferSize = builder.getFramesPerDataCallback();
124 
125     ALOGD("open(), request notificationFrames = %d, frameCount = %u",
126           notificationFrames, (uint)frameCount);
127 
128     // Don't call mAudioTrack->setDeviceId() because it will be overwritten by set()!
129     audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
130                                            ? AUDIO_PORT_HANDLE_NONE
131                                            : getDeviceId();
132 
133     const audio_content_type_t contentType =
134             AAudioConvert_contentTypeToInternal(builder.getContentType());
135     const audio_usage_t usage =
136             AAudioConvert_usageToInternal(builder.getUsage());
137     const audio_flags_mask_t attributesFlags =
138         AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy(),
139                                                          builder.getSpatializationBehavior(),
140                                                          builder.isContentSpatialized());
141 
142     const audio_attributes_t attributes = {
143             .content_type = contentType,
144             .usage = usage,
145             .source = AUDIO_SOURCE_DEFAULT, // only used for recording
146             .flags = attributesFlags,
147             .tags = ""
148     };
149 
150     mAudioTrack = new AudioTrack();
151     // TODO b/182392769: use attribution source util
152     mAudioTrack->set(
153             AUDIO_STREAM_DEFAULT,  // ignored because we pass attributes below
154             getSampleRate(),
155             format,
156             channelMask,
157             frameCount,
158             flags,
159             callback,
160             callbackData,
161             notificationFrames,
162             0,       // DEFAULT sharedBuffer*/,
163             false,   // DEFAULT threadCanCallJava
164             sessionId,
165             streamTransferType,
166             NULL,    // DEFAULT audio_offload_info_t
167             AttributionSourceState(), // DEFAULT uid and pid
168             &attributes,
169             // WARNING - If doNotReconnect set true then audio stops after plugging and unplugging
170             // headphones a few times.
171             false,   // DEFAULT doNotReconnect,
172             1.0f,    // DEFAULT maxRequiredSpeed
173             selectedDeviceId
174     );
175 
176     // Set it here so it can be logged by the destructor if the open failed.
177     mAudioTrack->setCallerName(kCallerName);
178 
179     // Did we get a valid track?
180     status_t status = mAudioTrack->initCheck();
181     if (status != NO_ERROR) {
182         safeReleaseClose();
183         ALOGE("open(), initCheck() returned %d", status);
184         return AAudioConvert_androidToAAudioResult(status);
185     }
186 
187     mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK)
188             + std::to_string(mAudioTrack->getPortId());
189     android::mediametrics::LogItem(mMetricsId)
190             .set(AMEDIAMETRICS_PROP_PERFORMANCEMODE,
191                  AudioGlobal_convertPerformanceModeToText(builder.getPerformanceMode()))
192             .set(AMEDIAMETRICS_PROP_SHARINGMODE,
193                  AudioGlobal_convertSharingModeToText(builder.getSharingMode()))
194             .set(AMEDIAMETRICS_PROP_ENCODINGCLIENT, toString(getFormat()).c_str()).record();
195 
196     doSetVolume();
197 
198     // Get the actual values from the AudioTrack.
199     setChannelMask(AAudioConvert_androidToAAudioChannelMask(
200         mAudioTrack->channelMask(), false /*isInput*/,
201         AAudio_isChannelIndexMask(getChannelMask())));
202     setFormat(mAudioTrack->format());
203     setDeviceFormat(mAudioTrack->format());
204     setSampleRate(mAudioTrack->getSampleRate());
205     setBufferCapacity(getBufferCapacityFromDevice());
206     setFramesPerBurst(getFramesPerBurstFromDevice());
207 
208     // We may need to pass the data through a block size adapter to guarantee constant size.
209     if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
210         // This may need to change if we add format conversion before
211         // the block size adaptation.
212         mBlockAdapterBytesPerFrame = getBytesPerFrame();
213         int callbackSizeBytes = mBlockAdapterBytesPerFrame * mCallbackBufferSize;
214         mFixedBlockReader.open(callbackSizeBytes);
215         mBlockAdapter = &mFixedBlockReader;
216     } else {
217         mBlockAdapter = nullptr;
218     }
219 
220     setState(AAUDIO_STREAM_STATE_OPEN);
221     setDeviceId(mAudioTrack->getRoutedDeviceId());
222 
223     aaudio_session_id_t actualSessionId =
224             (requestedSessionId == AAUDIO_SESSION_ID_NONE)
225             ? AAUDIO_SESSION_ID_NONE
226             : (aaudio_session_id_t) mAudioTrack->getSessionId();
227     setSessionId(actualSessionId);
228 
229     mAudioTrack->addAudioDeviceCallback(this);
230 
231     // Update performance mode based on the actual stream flags.
232     // For example, if the sample rate is not allowed then you won't get a FAST track.
233     audio_output_flags_t actualFlags = mAudioTrack->getFlags();
234     aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
235     // We may not get the RAW flag. But as long as we get the FAST flag we can call it LOW_LATENCY.
236     if ((actualFlags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
237         actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
238     } else if ((actualFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
239         actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
240     }
241     setPerformanceMode(actualPerformanceMode);
242 
243     setSharingMode(AAUDIO_SHARING_MODE_SHARED); // EXCLUSIVE mode not supported in legacy
244 
245     // Log if we did not get what we asked for.
246     ALOGD_IF(actualFlags != flags,
247              "open() flags changed from 0x%08X to 0x%08X",
248              flags, actualFlags);
249     ALOGD_IF(actualPerformanceMode != perfMode,
250              "open() perfMode changed from %d to %d",
251              perfMode, actualPerformanceMode);
252 
253     return AAUDIO_OK;
254 }
255 
release_l()256 aaudio_result_t AudioStreamTrack::release_l() {
257     if (getState() != AAUDIO_STREAM_STATE_CLOSING) {
258         status_t err = mAudioTrack->removeAudioDeviceCallback(this);
259         ALOGE_IF(err, "%s() removeAudioDeviceCallback returned %d", __func__, err);
260         logReleaseBufferState();
261         // Data callbacks may still be running!
262         return AudioStream::release_l();
263     } else {
264         return AAUDIO_OK; // already released
265     }
266 }
267 
close_l()268 void AudioStreamTrack::close_l() {
269     // The callbacks are normally joined in the AudioTrack destructor.
270     // But if another object has a reference to the AudioTrack then
271     // it will not get deleted here.
272     // So we should join callbacks explicitly before returning.
273     // Unlock around the join to avoid deadlocks if the callback tries to lock.
274     // This can happen if the callback returns AAUDIO_CALLBACK_RESULT_STOP
275     mStreamLock.unlock();
276     mAudioTrack->stopAndJoinCallbacks();
277     mStreamLock.lock();
278     mAudioTrack.clear();
279     // Do not close mFixedBlockReader. It has a unique_ptr to its buffer
280     // so it will clean up by itself.
281     AudioStream::close_l();
282 }
283 
processCallback(int event,void * info)284 void AudioStreamTrack::processCallback(int event, void *info) {
285 
286     switch (event) {
287         case AudioTrack::EVENT_MORE_DATA:
288             processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
289             break;
290 
291             // Stream got rerouted so we disconnect.
292         case AudioTrack::EVENT_NEW_IAUDIOTRACK:
293             // request stream disconnect if the restored AudioTrack has properties not matching
294             // what was requested initially
295             if (mAudioTrack->channelCount() != getSamplesPerFrame()
296                     || mAudioTrack->format() != getFormat()
297                     || mAudioTrack->getSampleRate() != getSampleRate()
298                     || mAudioTrack->getRoutedDeviceId() != getDeviceId()
299                     || getBufferCapacityFromDevice() != getBufferCapacity()
300                     || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
301                 processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
302             }
303             break;
304 
305         default:
306             break;
307     }
308     return;
309 }
310 
requestStart_l()311 aaudio_result_t AudioStreamTrack::requestStart_l() {
312     if (mAudioTrack.get() == nullptr) {
313         ALOGE("requestStart() no AudioTrack");
314         return AAUDIO_ERROR_INVALID_STATE;
315     }
316     // Get current position so we can detect when the track is playing.
317     status_t err = mAudioTrack->getPosition(&mPositionWhenStarting);
318     if (err != OK) {
319         return AAudioConvert_androidToAAudioResult(err);
320     }
321 
322     // Enable callback before starting AudioTrack to avoid shutting
323     // down because of a race condition.
324     mCallbackEnabled.store(true);
325     aaudio_stream_state_t originalState = getState();
326     // Set before starting the callback so that we are in the correct state
327     // before updateStateMachine() can be called by the callback.
328     setState(AAUDIO_STREAM_STATE_STARTING);
329     err = mAudioTrack->start();
330     if (err != OK) {
331         mCallbackEnabled.store(false);
332         setState(originalState);
333         return AAudioConvert_androidToAAudioResult(err);
334     }
335     return AAUDIO_OK;
336 }
337 
requestPause_l()338 aaudio_result_t AudioStreamTrack::requestPause_l() {
339     if (mAudioTrack.get() == nullptr) {
340         ALOGE("%s() no AudioTrack", __func__);
341         return AAUDIO_ERROR_INVALID_STATE;
342     }
343 
344     setState(AAUDIO_STREAM_STATE_PAUSING);
345     mAudioTrack->pause();
346     mCallbackEnabled.store(false);
347     status_t err = mAudioTrack->getPosition(&mPositionWhenPausing);
348     if (err != OK) {
349         return AAudioConvert_androidToAAudioResult(err);
350     }
351     return checkForDisconnectRequest(false);
352 }
353 
requestFlush_l()354 aaudio_result_t AudioStreamTrack::requestFlush_l() {
355     if (mAudioTrack.get() == nullptr) {
356         ALOGE("%s() no AudioTrack", __func__);
357         return AAUDIO_ERROR_INVALID_STATE;
358     }
359 
360     setState(AAUDIO_STREAM_STATE_FLUSHING);
361     incrementFramesRead(getFramesWritten() - getFramesRead());
362     mAudioTrack->flush();
363     mFramesRead.reset32(); // service reads frames, service position reset on flush
364     mTimestampPosition.reset32();
365     return AAUDIO_OK;
366 }
367 
requestStop_l()368 aaudio_result_t AudioStreamTrack::requestStop_l() {
369     if (mAudioTrack.get() == nullptr) {
370         ALOGE("%s() no AudioTrack", __func__);
371         return AAUDIO_ERROR_INVALID_STATE;
372     }
373 
374     setState(AAUDIO_STREAM_STATE_STOPPING);
375     mFramesRead.catchUpTo(getFramesWritten());
376     mTimestampPosition.catchUpTo(getFramesWritten());
377     mFramesRead.reset32(); // service reads frames, service position reset on stop
378     mTimestampPosition.reset32();
379     mAudioTrack->stop();
380     mCallbackEnabled.store(false);
381     return checkForDisconnectRequest(false);;
382 }
383 
updateStateMachine()384 aaudio_result_t AudioStreamTrack::updateStateMachine()
385 {
386     status_t err;
387     aaudio_wrapping_frames_t position;
388     switch (getState()) {
389     // TODO add better state visibility to AudioTrack
390     case AAUDIO_STREAM_STATE_STARTING:
391         if (mAudioTrack->hasStarted()) {
392             setState(AAUDIO_STREAM_STATE_STARTED);
393         }
394         break;
395     case AAUDIO_STREAM_STATE_PAUSING:
396         if (mAudioTrack->stopped()) {
397             err = mAudioTrack->getPosition(&position);
398             if (err != OK) {
399                 return AAudioConvert_androidToAAudioResult(err);
400             } else if (position == mPositionWhenPausing) {
401                 // Has stream really stopped advancing?
402                 setState(AAUDIO_STREAM_STATE_PAUSED);
403             }
404             mPositionWhenPausing = position;
405         }
406         break;
407     case AAUDIO_STREAM_STATE_FLUSHING:
408         {
409             err = mAudioTrack->getPosition(&position);
410             if (err != OK) {
411                 return AAudioConvert_androidToAAudioResult(err);
412             } else if (position == 0) {
413                 // TODO Advance frames read to match written.
414                 setState(AAUDIO_STREAM_STATE_FLUSHED);
415             }
416         }
417         break;
418     case AAUDIO_STREAM_STATE_STOPPING:
419         if (mAudioTrack->stopped()) {
420             setState(AAUDIO_STREAM_STATE_STOPPED);
421         }
422         break;
423     default:
424         break;
425     }
426     return AAUDIO_OK;
427 }
428 
write(const void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)429 aaudio_result_t AudioStreamTrack::write(const void *buffer,
430                                       int32_t numFrames,
431                                       int64_t timeoutNanoseconds)
432 {
433     int32_t bytesPerFrame = getBytesPerFrame();
434     int32_t numBytes;
435     aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
436     if (result != AAUDIO_OK) {
437         return result;
438     }
439 
440     if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
441         return AAUDIO_ERROR_DISCONNECTED;
442     }
443 
444     // TODO add timeout to AudioTrack
445     bool blocking = timeoutNanoseconds > 0;
446     ssize_t bytesWritten = mAudioTrack->write(buffer, numBytes, blocking);
447     if (bytesWritten == WOULD_BLOCK) {
448         return 0;
449     } else if (bytesWritten < 0) {
450         ALOGE("invalid write, returned %d", (int)bytesWritten);
451         // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
452         // AudioTrack invalidation
453         if (bytesWritten == DEAD_OBJECT) {
454             setState(AAUDIO_STREAM_STATE_DISCONNECTED);
455             return AAUDIO_ERROR_DISCONNECTED;
456         }
457         return AAudioConvert_androidToAAudioResult(bytesWritten);
458     }
459     int32_t framesWritten = (int32_t)(bytesWritten / bytesPerFrame);
460     incrementFramesWritten(framesWritten);
461 
462     result = updateStateMachine();
463     if (result != AAUDIO_OK) {
464         return result;
465     }
466 
467     return framesWritten;
468 }
469 
setBufferSize(int32_t requestedFrames)470 aaudio_result_t AudioStreamTrack::setBufferSize(int32_t requestedFrames)
471 {
472     // Do not ask for less than one burst.
473     if (requestedFrames < getFramesPerBurst()) {
474         requestedFrames = getFramesPerBurst();
475     }
476     ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
477     if (result < 0) {
478         return AAudioConvert_androidToAAudioResult(result);
479     } else {
480         return result;
481     }
482 }
483 
getBufferSize() const484 int32_t AudioStreamTrack::getBufferSize() const
485 {
486     return static_cast<int32_t>(mAudioTrack->getBufferSizeInFrames());
487 }
488 
getBufferCapacityFromDevice() const489 int32_t AudioStreamTrack::getBufferCapacityFromDevice() const
490 {
491     return static_cast<int32_t>(mAudioTrack->frameCount());
492 }
493 
getXRunCount() const494 int32_t AudioStreamTrack::getXRunCount() const
495 {
496     return static_cast<int32_t>(mAudioTrack->getUnderrunCount());
497 }
498 
getFramesPerBurstFromDevice() const499 int32_t AudioStreamTrack::getFramesPerBurstFromDevice() const {
500     return static_cast<int32_t>(mAudioTrack->getNotificationPeriodInFrames());
501 }
502 
getFramesRead()503 int64_t AudioStreamTrack::getFramesRead() {
504     aaudio_wrapping_frames_t position;
505     status_t result;
506     switch (getState()) {
507     case AAUDIO_STREAM_STATE_STARTING:
508     case AAUDIO_STREAM_STATE_STARTED:
509     case AAUDIO_STREAM_STATE_STOPPING:
510     case AAUDIO_STREAM_STATE_PAUSING:
511     case AAUDIO_STREAM_STATE_PAUSED:
512         result = mAudioTrack->getPosition(&position);
513         if (result == OK) {
514             mFramesRead.update32(position);
515         }
516         break;
517     default:
518         break;
519     }
520     return AudioStreamLegacy::getFramesRead();
521 }
522 
getTimestamp(clockid_t clockId,int64_t * framePosition,int64_t * timeNanoseconds)523 aaudio_result_t AudioStreamTrack::getTimestamp(clockid_t clockId,
524                                      int64_t *framePosition,
525                                      int64_t *timeNanoseconds) {
526     ExtendedTimestamp extendedTimestamp;
527     status_t status = mAudioTrack->getTimestamp(&extendedTimestamp);
528     if (status == WOULD_BLOCK) {
529         return AAUDIO_ERROR_INVALID_STATE;
530     } if (status != NO_ERROR) {
531         return AAudioConvert_androidToAAudioResult(status);
532     }
533     int64_t position = 0;
534     int64_t nanoseconds = 0;
535     aaudio_result_t result = getBestTimestamp(clockId, &position,
536                                               &nanoseconds, &extendedTimestamp);
537     if (result == AAUDIO_OK) {
538         if (position < getFramesWritten()) {
539             *framePosition = position;
540             *timeNanoseconds = nanoseconds;
541             return result;
542         } else {
543             return AAUDIO_ERROR_INVALID_STATE; // TODO review, documented but not consistent
544         }
545     }
546     return result;
547 }
548 
doSetVolume()549 status_t AudioStreamTrack::doSetVolume() {
550     status_t status = NO_INIT;
551     if (mAudioTrack.get() != nullptr) {
552         float volume = getDuckAndMuteVolume();
553         mAudioTrack->setVolume(volume, volume);
554         status = NO_ERROR;
555     }
556     return status;
557 }
558 
559 #if AAUDIO_USE_VOLUME_SHAPER
560 
561 using namespace android::media::VolumeShaper;
562 
applyVolumeShaper(const VolumeShaper::Configuration & configuration,const VolumeShaper::Operation & operation)563 binder::Status AudioStreamTrack::applyVolumeShaper(
564         const VolumeShaper::Configuration& configuration,
565         const VolumeShaper::Operation& operation) {
566 
567     sp<VolumeShaper::Configuration> spConfiguration = new VolumeShaper::Configuration(configuration);
568     sp<VolumeShaper::Operation> spOperation = new VolumeShaper::Operation(operation);
569 
570     if (mAudioTrack.get() != nullptr) {
571         ALOGD("applyVolumeShaper() from IPlayer");
572         binder::Status status = mAudioTrack->applyVolumeShaper(spConfiguration, spOperation);
573         if (status < 0) { // a non-negative value is the volume shaper id.
574             ALOGE("applyVolumeShaper() failed with status %d", status);
575         }
576         return aidl_utils::binderStatusFromStatusT(status);
577     } else {
578         ALOGD("applyVolumeShaper()"
579                       " no AudioTrack for volume control from IPlayer");
580         return binder::Status::ok();
581     }
582 }
583 #endif
584