1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18
19 #define LOG_TAG "AudioFlinger"
20 //#define LOG_NDEBUG 0
21 #define ATRACE_TAG ATRACE_TAG_AUDIO
22
23 #include "Configuration.h"
24 #include <linux/futex.h>
25 #include <math.h>
26 #include <sys/syscall.h>
27 #include <utils/Log.h>
28 #include <utils/Trace.h>
29
30 #include <private/media/AudioTrackShared.h>
31
32 #include "AudioFlinger.h"
33
34 #include <media/nbaio/Pipe.h>
35 #include <media/nbaio/PipeReader.h>
36 #include <media/AudioValidator.h>
37 #include <media/RecordBufferConverter.h>
38 #include <mediautils/ServiceUtilities.h>
39 #include <audio_utils/minifloat.h>
40
41 // ----------------------------------------------------------------------------
42
43 // Note: the following macro is used for extremely verbose logging message. In
44 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
45 // 0; but one side effect of this is to turn all LOGV's as well. Some messages
46 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
47 // turned on. Do not uncomment the #def below unless you really know what you
48 // are doing and want to see all of the extremely verbose messages.
49 //#define VERY_VERY_VERBOSE_LOGGING
50 #ifdef VERY_VERY_VERBOSE_LOGGING
51 #define ALOGVV ALOGV
52 #else
53 #define ALOGVV(a...) do { } while(0)
54 #endif
55
56 // TODO: Remove when this is put into AidlConversionUtil.h
57 #define VALUE_OR_RETURN_BINDER_STATUS(x) \
58 ({ \
59 auto _tmp = (x); \
60 if (!_tmp.ok()) return ::android::aidl_utils::binderStatusFromStatusT(_tmp.error()); \
61 std::move(_tmp.value()); \
62 })
63
64 namespace android {
65
66 using ::android::aidl_utils::binderStatusFromStatusT;
67 using binder::Status;
68 using content::AttributionSourceState;
69 using media::VolumeShaper;
70 // ----------------------------------------------------------------------------
71 // TrackBase
72 // ----------------------------------------------------------------------------
73 #undef LOG_TAG
74 #define LOG_TAG "AF::TrackBase"
75
76 static volatile int32_t nextTrackId = 55;
77
78 // TrackBase constructor must be called with AudioFlinger::mLock held
TrackBase(ThreadBase * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,uid_t clientUid,bool isOut,alloc_type alloc,track_type type,audio_port_handle_t portId,std::string metricsId)79 AudioFlinger::ThreadBase::TrackBase::TrackBase(
80 ThreadBase *thread,
81 const sp<Client>& client,
82 const audio_attributes_t& attr,
83 uint32_t sampleRate,
84 audio_format_t format,
85 audio_channel_mask_t channelMask,
86 size_t frameCount,
87 void *buffer,
88 size_t bufferSize,
89 audio_session_t sessionId,
90 pid_t creatorPid,
91 uid_t clientUid,
92 bool isOut,
93 alloc_type alloc,
94 track_type type,
95 audio_port_handle_t portId,
96 std::string metricsId)
97 : RefBase(),
98 mThread(thread),
99 mClient(client),
100 mCblk(NULL),
101 // mBuffer, mBufferSize
102 mState(IDLE),
103 mAttr(attr),
104 mSampleRate(sampleRate),
105 mFormat(format),
106 mChannelMask(channelMask),
107 mChannelCount(isOut ?
108 audio_channel_count_from_out_mask(channelMask) :
109 audio_channel_count_from_in_mask(channelMask)),
110 mFrameSize(audio_has_proportional_frames(format) ?
111 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
112 mFrameCount(frameCount),
113 mSessionId(sessionId),
114 mIsOut(isOut),
115 mId(android_atomic_inc(&nextTrackId)),
116 mTerminated(false),
117 mType(type),
118 mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
119 mPortId(portId),
120 mIsInvalid(false),
121 mTrackMetrics(std::move(metricsId), isOut),
122 mCreatorPid(creatorPid)
123 {
124 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
125 if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
126 ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
127 "%s(%d): uid %d tried to pass itself off as %d",
128 __func__, mId, callingUid, clientUid);
129 clientUid = callingUid;
130 }
131 // clientUid contains the uid of the app that is responsible for this track, so we can blame
132 // battery usage on it.
133 mUid = clientUid;
134
135 // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
136
137 size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
138 // check overflow when computing bufferSize due to multiplication by mFrameSize.
139 if (minBufferSize < frameCount // roundup rounds down for values above UINT_MAX / 2
140 || mFrameSize == 0 // format needs to be correct
141 || minBufferSize > SIZE_MAX / mFrameSize) {
142 android_errorWriteLog(0x534e4554, "34749571");
143 return;
144 }
145 minBufferSize *= mFrameSize;
146
147 if (buffer == nullptr) {
148 bufferSize = minBufferSize; // allocated here.
149 } else if (minBufferSize > bufferSize) {
150 android_errorWriteLog(0x534e4554, "38340117");
151 return;
152 }
153
154 size_t size = sizeof(audio_track_cblk_t);
155 if (buffer == NULL && alloc == ALLOC_CBLK) {
156 // check overflow when computing allocation size for streaming tracks.
157 if (size > SIZE_MAX - bufferSize) {
158 android_errorWriteLog(0x534e4554, "34749571");
159 return;
160 }
161 size += bufferSize;
162 }
163
164 if (client != 0) {
165 mCblkMemory = client->heap()->allocate(size);
166 if (mCblkMemory == 0 ||
167 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
168 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
169 client->heap()->dump("AudioTrack");
170 mCblkMemory.clear();
171 return;
172 }
173 } else {
174 mCblk = (audio_track_cblk_t *) malloc(size);
175 if (mCblk == NULL) {
176 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
177 return;
178 }
179 }
180
181 // construct the shared structure in-place.
182 if (mCblk != NULL) {
183 new(mCblk) audio_track_cblk_t();
184 switch (alloc) {
185 case ALLOC_READONLY: {
186 const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
187 if (roHeap == 0 ||
188 (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
189 (mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
190 ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
191 __func__, mId, bufferSize);
192 if (roHeap != 0) {
193 roHeap->dump("buffer");
194 }
195 mCblkMemory.clear();
196 mBufferMemory.clear();
197 return;
198 }
199 memset(mBuffer, 0, bufferSize);
200 } break;
201 case ALLOC_PIPE:
202 mBufferMemory = thread->pipeMemory();
203 // mBuffer is the virtual address as seen from current process (mediaserver),
204 // and should normally be coming from mBufferMemory->unsecurePointer().
205 // However in this case the TrackBase does not reference the buffer directly.
206 // It should references the buffer via the pipe.
207 // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
208 mBuffer = NULL;
209 bufferSize = 0;
210 break;
211 case ALLOC_CBLK:
212 // clear all buffers
213 if (buffer == NULL) {
214 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
215 memset(mBuffer, 0, bufferSize);
216 } else {
217 mBuffer = buffer;
218 #if 0
219 mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic
220 #endif
221 }
222 break;
223 case ALLOC_LOCAL:
224 mBuffer = calloc(1, bufferSize);
225 break;
226 case ALLOC_NONE:
227 mBuffer = buffer;
228 break;
229 default:
230 LOG_ALWAYS_FATAL("%s(%d): invalid allocation type: %d", __func__, mId, (int)alloc);
231 }
232 mBufferSize = bufferSize;
233
234 #ifdef TEE_SINK
235 mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
236 #endif
237 // mState is mirrored for the client to read.
238 mState.setMirror(&mCblk->mState);
239 // ensure our state matches up until we consolidate the enumeration.
240 static_assert(CBLK_STATE_IDLE == IDLE);
241 static_assert(CBLK_STATE_PAUSING == PAUSING);
242 }
243 }
244
245 // TODO b/182392769: use attribution source util
audioServerAttributionSource(pid_t pid)246 static AttributionSourceState audioServerAttributionSource(pid_t pid) {
247 AttributionSourceState attributionSource{};
248 attributionSource.uid = AID_AUDIOSERVER;
249 attributionSource.pid = pid;
250 attributionSource.token = sp<BBinder>::make();
251 return attributionSource;
252 }
253
initCheck() const254 status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const
255 {
256 status_t status;
257 if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
258 status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
259 } else {
260 status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
261 }
262 return status;
263 }
264
~TrackBase()265 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
266 {
267 // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
268 mServerProxy.clear();
269 releaseCblk();
270 mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
271 if (mClient != 0) {
272 // Client destructor must run with AudioFlinger client mutex locked
273 Mutex::Autolock _l(mClient->audioFlinger()->mClientLock);
274 // If the client's reference count drops to zero, the associated destructor
275 // must run with AudioFlinger lock held. Thus the explicit clear() rather than
276 // relying on the automatic clear() at end of scope.
277 mClient.clear();
278 }
279 // flush the binder command buffer
280 IPCThreadState::self()->flushCommands();
281 }
282
283 // AudioBufferProvider interface
284 // getNextBuffer() = 0;
285 // This implementation of releaseBuffer() is used by Track and RecordTrack
releaseBuffer(AudioBufferProvider::Buffer * buffer)286 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
287 {
288 #ifdef TEE_SINK
289 mTee.write(buffer->raw, buffer->frameCount);
290 #endif
291
292 ServerProxy::Buffer buf;
293 buf.mFrameCount = buffer->frameCount;
294 buf.mRaw = buffer->raw;
295 buffer->frameCount = 0;
296 buffer->raw = NULL;
297 mServerProxy->releaseBuffer(&buf);
298 }
299
setSyncEvent(const sp<SyncEvent> & event)300 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
301 {
302 mSyncEvents.add(event);
303 return NO_ERROR;
304 }
305
PatchTrackBase(sp<ClientProxy> proxy,const ThreadBase & thread,const Timeout & timeout)306 AudioFlinger::ThreadBase::PatchTrackBase::PatchTrackBase(sp<ClientProxy> proxy,
307 const ThreadBase& thread,
308 const Timeout& timeout)
309 : mProxy(proxy)
310 {
311 if (timeout) {
312 setPeerTimeout(*timeout);
313 } else {
314 // Double buffer mixer
315 uint64_t mixBufferNs = ((uint64_t)2 * thread.frameCount() * 1000000000) /
316 thread.sampleRate();
317 setPeerTimeout(std::chrono::nanoseconds{mixBufferNs});
318 }
319 }
320
setPeerTimeout(std::chrono::nanoseconds timeout)321 void AudioFlinger::ThreadBase::PatchTrackBase::setPeerTimeout(std::chrono::nanoseconds timeout) {
322 mPeerTimeout.tv_sec = timeout.count() / std::nano::den;
323 mPeerTimeout.tv_nsec = timeout.count() % std::nano::den;
324 }
325
326
327 // ----------------------------------------------------------------------------
328 // Playback
329 // ----------------------------------------------------------------------------
330 #undef LOG_TAG
331 #define LOG_TAG "AF::TrackHandle"
332
TrackHandle(const sp<AudioFlinger::PlaybackThread::Track> & track)333 AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
334 : BnAudioTrack(),
335 mTrack(track)
336 {
337 }
338
~TrackHandle()339 AudioFlinger::TrackHandle::~TrackHandle() {
340 // just stop the track on deletion, associated resources
341 // will be freed from the main thread once all pending buffers have
342 // been played. Unless it's not in the active track list, in which
343 // case we free everything now...
344 mTrack->destroy();
345 }
346
getCblk(std::optional<media::SharedFileRegion> * _aidl_return)347 Status AudioFlinger::TrackHandle::getCblk(
348 std::optional<media::SharedFileRegion>* _aidl_return) {
349 *_aidl_return = legacy2aidl_NullableIMemory_SharedFileRegion(mTrack->getCblk()).value();
350 return Status::ok();
351 }
352
start(int32_t * _aidl_return)353 Status AudioFlinger::TrackHandle::start(int32_t* _aidl_return) {
354 *_aidl_return = mTrack->start();
355 return Status::ok();
356 }
357
stop()358 Status AudioFlinger::TrackHandle::stop() {
359 mTrack->stop();
360 return Status::ok();
361 }
362
flush()363 Status AudioFlinger::TrackHandle::flush() {
364 mTrack->flush();
365 return Status::ok();
366 }
367
pause()368 Status AudioFlinger::TrackHandle::pause() {
369 mTrack->pause();
370 return Status::ok();
371 }
372
attachAuxEffect(int32_t effectId,int32_t * _aidl_return)373 Status AudioFlinger::TrackHandle::attachAuxEffect(int32_t effectId,
374 int32_t* _aidl_return) {
375 *_aidl_return = mTrack->attachAuxEffect(effectId);
376 return Status::ok();
377 }
378
setParameters(const std::string & keyValuePairs,int32_t * _aidl_return)379 Status AudioFlinger::TrackHandle::setParameters(const std::string& keyValuePairs,
380 int32_t* _aidl_return) {
381 *_aidl_return = mTrack->setParameters(String8(keyValuePairs.c_str()));
382 return Status::ok();
383 }
384
selectPresentation(int32_t presentationId,int32_t programId,int32_t * _aidl_return)385 Status AudioFlinger::TrackHandle::selectPresentation(int32_t presentationId, int32_t programId,
386 int32_t* _aidl_return) {
387 *_aidl_return = mTrack->selectPresentation(presentationId, programId);
388 return Status::ok();
389 }
390
getTimestamp(media::AudioTimestampInternal * timestamp,int32_t * _aidl_return)391 Status AudioFlinger::TrackHandle::getTimestamp(media::AudioTimestampInternal* timestamp,
392 int32_t* _aidl_return) {
393 AudioTimestamp legacy;
394 *_aidl_return = mTrack->getTimestamp(legacy);
395 if (*_aidl_return != OK) {
396 return Status::ok();
397 }
398 *timestamp = legacy2aidl_AudioTimestamp_AudioTimestampInternal(legacy).value();
399 return Status::ok();
400 }
401
signal()402 Status AudioFlinger::TrackHandle::signal() {
403 mTrack->signal();
404 return Status::ok();
405 }
406
applyVolumeShaper(const media::VolumeShaperConfiguration & configuration,const media::VolumeShaperOperation & operation,int32_t * _aidl_return)407 Status AudioFlinger::TrackHandle::applyVolumeShaper(
408 const media::VolumeShaperConfiguration& configuration,
409 const media::VolumeShaperOperation& operation,
410 int32_t* _aidl_return) {
411 sp<VolumeShaper::Configuration> conf = new VolumeShaper::Configuration();
412 *_aidl_return = conf->readFromParcelable(configuration);
413 if (*_aidl_return != OK) {
414 return Status::ok();
415 }
416
417 sp<VolumeShaper::Operation> op = new VolumeShaper::Operation();
418 *_aidl_return = op->readFromParcelable(operation);
419 if (*_aidl_return != OK) {
420 return Status::ok();
421 }
422
423 *_aidl_return = mTrack->applyVolumeShaper(conf, op);
424 return Status::ok();
425 }
426
getVolumeShaperState(int32_t id,std::optional<media::VolumeShaperState> * _aidl_return)427 Status AudioFlinger::TrackHandle::getVolumeShaperState(
428 int32_t id,
429 std::optional<media::VolumeShaperState>* _aidl_return) {
430 sp<VolumeShaper::State> legacy = mTrack->getVolumeShaperState(id);
431 if (legacy == nullptr) {
432 _aidl_return->reset();
433 return Status::ok();
434 }
435 media::VolumeShaperState aidl;
436 legacy->writeToParcelable(&aidl);
437 *_aidl_return = aidl;
438 return Status::ok();
439 }
440
getDualMonoMode(media::AudioDualMonoMode * _aidl_return)441 Status AudioFlinger::TrackHandle::getDualMonoMode(media::AudioDualMonoMode* _aidl_return)
442 {
443 audio_dual_mono_mode_t mode = AUDIO_DUAL_MONO_MODE_OFF;
444 const status_t status = mTrack->getDualMonoMode(&mode)
445 ?: AudioValidator::validateDualMonoMode(mode);
446 if (status == OK) {
447 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
448 legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(mode));
449 }
450 return binderStatusFromStatusT(status);
451 }
452
setDualMonoMode(media::AudioDualMonoMode mode)453 Status AudioFlinger::TrackHandle::setDualMonoMode(
454 media::AudioDualMonoMode mode)
455 {
456 const auto localMonoMode = VALUE_OR_RETURN_BINDER_STATUS(
457 aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(mode));
458 return binderStatusFromStatusT(AudioValidator::validateDualMonoMode(localMonoMode)
459 ?: mTrack->setDualMonoMode(localMonoMode));
460 }
461
getAudioDescriptionMixLevel(float * _aidl_return)462 Status AudioFlinger::TrackHandle::getAudioDescriptionMixLevel(float* _aidl_return)
463 {
464 float leveldB = -std::numeric_limits<float>::infinity();
465 const status_t status = mTrack->getAudioDescriptionMixLevel(&leveldB)
466 ?: AudioValidator::validateAudioDescriptionMixLevel(leveldB);
467 if (status == OK) *_aidl_return = leveldB;
468 return binderStatusFromStatusT(status);
469 }
470
setAudioDescriptionMixLevel(float leveldB)471 Status AudioFlinger::TrackHandle::setAudioDescriptionMixLevel(float leveldB)
472 {
473 return binderStatusFromStatusT(AudioValidator::validateAudioDescriptionMixLevel(leveldB)
474 ?: mTrack->setAudioDescriptionMixLevel(leveldB));
475 }
476
getPlaybackRateParameters(media::AudioPlaybackRate * _aidl_return)477 Status AudioFlinger::TrackHandle::getPlaybackRateParameters(
478 media::AudioPlaybackRate* _aidl_return)
479 {
480 audio_playback_rate_t localPlaybackRate{};
481 status_t status = mTrack->getPlaybackRateParameters(&localPlaybackRate)
482 ?: AudioValidator::validatePlaybackRate(localPlaybackRate);
483 if (status == NO_ERROR) {
484 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
485 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(localPlaybackRate));
486 }
487 return binderStatusFromStatusT(status);
488 }
489
setPlaybackRateParameters(const media::AudioPlaybackRate & playbackRate)490 Status AudioFlinger::TrackHandle::setPlaybackRateParameters(
491 const media::AudioPlaybackRate& playbackRate)
492 {
493 const audio_playback_rate_t localPlaybackRate = VALUE_OR_RETURN_BINDER_STATUS(
494 aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(playbackRate));
495 return binderStatusFromStatusT(AudioValidator::validatePlaybackRate(localPlaybackRate)
496 ?: mTrack->setPlaybackRateParameters(localPlaybackRate));
497 }
498
499 // ----------------------------------------------------------------------------
500 // AppOp for audio playback
501 // -------------------------------
502
503 // static
504 sp<AudioFlinger::PlaybackThread::OpPlayAudioMonitor>
createIfNeeded(const AttributionSourceState & attributionSource,const audio_attributes_t & attr,int id,audio_stream_type_t streamType)505 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::createIfNeeded(
506 const AttributionSourceState& attributionSource, const audio_attributes_t& attr, int id,
507 audio_stream_type_t streamType)
508 {
509 Vector <String16> packages;
510 uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
511 getPackagesForUid(uid, packages);
512 if (isServiceUid(uid)) {
513 if (packages.isEmpty()) {
514 ALOGD("OpPlayAudio: not muting track:%d usage:%d for service UID %d",
515 id,
516 attr.usage,
517 uid);
518 return nullptr;
519 }
520 }
521 // stream type has been filtered by audio policy to indicate whether it can be muted
522 if (streamType == AUDIO_STREAM_ENFORCED_AUDIBLE) {
523 ALOGD("OpPlayAudio: not muting track:%d usage:%d ENFORCED_AUDIBLE", id, attr.usage);
524 return nullptr;
525 }
526 if ((attr.flags & AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY)
527 == AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY) {
528 ALOGD("OpPlayAudio: not muting track:%d flags %#x have FLAG_BYPASS_INTERRUPTION_POLICY",
529 id, attr.flags);
530 return nullptr;
531 }
532
533 AttributionSourceState checkedAttributionSource = AudioFlinger::checkAttributionSourcePackage(
534 attributionSource);
535 return new OpPlayAudioMonitor(checkedAttributionSource, attr.usage, id);
536 }
537
OpPlayAudioMonitor(const AttributionSourceState & attributionSource,audio_usage_t usage,int id)538 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
539 const AttributionSourceState& attributionSource, audio_usage_t usage, int id)
540 : mHasOpPlayAudio(true), mAttributionSource(attributionSource), mUsage((int32_t) usage),
541 mId(id)
542 {
543 }
544
~OpPlayAudioMonitor()545 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::~OpPlayAudioMonitor()
546 {
547 if (mOpCallback != 0) {
548 mAppOpsManager.stopWatchingMode(mOpCallback);
549 }
550 mOpCallback.clear();
551 }
552
onFirstRef()553 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::onFirstRef()
554 {
555 checkPlayAudioForUsage();
556 if (mAttributionSource.packageName.has_value()) {
557 mOpCallback = new PlayAudioOpCallback(this);
558 mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO,
559 VALUE_OR_FATAL(aidl2legacy_string_view_String16(
560 mAttributionSource.packageName.value_or("")))
561 , mOpCallback);
562 }
563 }
564
hasOpPlayAudio() const565 bool AudioFlinger::PlaybackThread::OpPlayAudioMonitor::hasOpPlayAudio() const {
566 return mHasOpPlayAudio.load();
567 }
568
569 // Note this method is never called (and never to be) for audio server / patch record track
570 // - not called from constructor due to check on UID,
571 // - not called from PlayAudioOpCallback because the callback is not installed in this case
checkPlayAudioForUsage()572 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::checkPlayAudioForUsage()
573 {
574 if (!mAttributionSource.packageName.has_value()) {
575 mHasOpPlayAudio.store(false);
576 } else {
577 uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAttributionSource.uid));
578 String16 packageName = VALUE_OR_FATAL(
579 aidl2legacy_string_view_String16(mAttributionSource.packageName.value_or("")));
580 bool hasIt = mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO,
581 mUsage, uid, packageName) == AppOpsManager::MODE_ALLOWED;
582 ALOGD("OpPlayAudio: track:%d usage:%d %smuted", mId, mUsage, hasIt ? "not " : "");
583 mHasOpPlayAudio.store(hasIt);
584 }
585 }
586
PlayAudioOpCallback(const wp<OpPlayAudioMonitor> & monitor)587 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::PlayAudioOpCallback::PlayAudioOpCallback(
588 const wp<OpPlayAudioMonitor>& monitor) : mMonitor(monitor)
589 { }
590
opChanged(int32_t op,const String16 & packageName)591 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::PlayAudioOpCallback::opChanged(int32_t op,
592 const String16& packageName) {
593 // we only have uid, so we need to check all package names anyway
594 UNUSED(packageName);
595 if (op != AppOpsManager::OP_PLAY_AUDIO) {
596 return;
597 }
598 sp<OpPlayAudioMonitor> monitor = mMonitor.promote();
599 if (monitor != NULL) {
600 monitor->checkPlayAudioForUsage();
601 }
602 }
603
604 // static
getPackagesForUid(uid_t uid,Vector<String16> & packages)605 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::getPackagesForUid(
606 uid_t uid, Vector<String16>& packages)
607 {
608 PermissionController permissionController;
609 permissionController.getPackagesForUid(uid, packages);
610 }
611
612 // ----------------------------------------------------------------------------
613 #undef LOG_TAG
614 #define LOG_TAG "AF::Track"
615
616 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
Track(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed)617 AudioFlinger::PlaybackThread::Track::Track(
618 PlaybackThread *thread,
619 const sp<Client>& client,
620 audio_stream_type_t streamType,
621 const audio_attributes_t& attr,
622 uint32_t sampleRate,
623 audio_format_t format,
624 audio_channel_mask_t channelMask,
625 size_t frameCount,
626 void *buffer,
627 size_t bufferSize,
628 const sp<IMemory>& sharedBuffer,
629 audio_session_t sessionId,
630 pid_t creatorPid,
631 const AttributionSourceState& attributionSource,
632 audio_output_flags_t flags,
633 track_type type,
634 audio_port_handle_t portId,
635 size_t frameCountToBeReady,
636 float speed)
637 : TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
638 // TODO: Using unsecurePointer() has some associated security pitfalls
639 // (see declaration for details).
640 // Either document why it is safe in this case or address the
641 // issue (e.g. by copying).
642 (sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
643 (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
644 sessionId, creatorPid,
645 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)), true /*isOut*/,
646 (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
647 type,
648 portId,
649 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(portId)),
650 mFillingUpStatus(FS_INVALID),
651 // mRetryCount initialized later when needed
652 mSharedBuffer(sharedBuffer),
653 mStreamType(streamType),
654 mMainBuffer(thread->sinkBuffer()),
655 mAuxBuffer(NULL),
656 mAuxEffectId(0), mHasVolumeController(false),
657 mFrameMap(16 /* sink-frame-to-track-frame map memory */),
658 mVolumeHandler(new media::VolumeHandler(sampleRate)),
659 mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(attributionSource, attr, id(),
660 streamType)),
661 // mSinkTimestamp
662 mFastIndex(-1),
663 mCachedVolume(1.0),
664 /* The track might not play immediately after being active, similarly as if its volume was 0.
665 * When the track starts playing, its volume will be computed. */
666 mFinalVolume(0.f),
667 mResumeToStopping(false),
668 mFlushHwPending(false),
669 mFlags(flags),
670 mSpeed(speed)
671 {
672 // client == 0 implies sharedBuffer == 0
673 ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
674
675 ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
676 __func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
677
678 if (mCblk == NULL) {
679 return;
680 }
681
682 uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
683 if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
684 ALOGE("%s(%d): no more tracks available", __func__, mId);
685 releaseCblk(); // this makes the track invalid.
686 return;
687 }
688
689 if (sharedBuffer == 0) {
690 mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
691 mFrameSize, !isExternalTrack(), sampleRate);
692 } else {
693 mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
694 mFrameSize, sampleRate);
695 }
696 mServerProxy = mAudioTrackServerProxy;
697 mServerProxy->setStartThresholdInFrames(frameCountToBeReady); // update the Cblk value
698
699 // only allocate a fast track index if we were able to allocate a normal track name
700 if (flags & AUDIO_OUTPUT_FLAG_FAST) {
701 // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
702 // race with setSyncEvent(). However, if we call it, we cannot properly start
703 // static fast tracks (SoundPool) immediately after stopping.
704 //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
705 ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
706 int i = __builtin_ctz(thread->mFastTrackAvailMask);
707 ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);
708 // FIXME This is too eager. We allocate a fast track index before the
709 // fast track becomes active. Since fast tracks are a scarce resource,
710 // this means we are potentially denying other more important fast tracks from
711 // being created. It would be better to allocate the index dynamically.
712 mFastIndex = i;
713 thread->mFastTrackAvailMask &= ~(1 << i);
714 }
715
716 mServerLatencySupported = thread->type() == ThreadBase::MIXER
717 || thread->type() == ThreadBase::DUPLICATING;
718 #ifdef TEE_SINK
719 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
720 + "_" + std::to_string(mId) + "_T");
721 #endif
722
723 if (thread->supportsHapticPlayback()) {
724 // If the track is attached to haptic playback thread, it is potentially to have
725 // HapticGenerator effect, which will generate haptic data, on the track. In that case,
726 // external vibration is always created for all tracks attached to haptic playback thread.
727 mAudioVibrationController = new AudioVibrationController(this);
728 std::string packageName = attributionSource.packageName.has_value() ?
729 attributionSource.packageName.value() : "";
730 mExternalVibration = new os::ExternalVibration(
731 mUid, packageName, mAttr, mAudioVibrationController);
732 }
733
734 // Once this item is logged by the server, the client can add properties.
735 const char * const traits = sharedBuffer == 0 ? "" : "static";
736 mTrackMetrics.logConstructor(creatorPid, uid, id(), traits, streamType);
737 }
738
~Track()739 AudioFlinger::PlaybackThread::Track::~Track()
740 {
741 ALOGV("%s(%d)", __func__, mId);
742
743 // The destructor would clear mSharedBuffer,
744 // but it will not push the decremented reference count,
745 // leaving the client's IMemory dangling indefinitely.
746 // This prevents that leak.
747 if (mSharedBuffer != 0) {
748 mSharedBuffer.clear();
749 }
750 }
751
initCheck() const752 status_t AudioFlinger::PlaybackThread::Track::initCheck() const
753 {
754 status_t status = TrackBase::initCheck();
755 if (status == NO_ERROR && mCblk == nullptr) {
756 status = NO_MEMORY;
757 }
758 return status;
759 }
760
destroy()761 void AudioFlinger::PlaybackThread::Track::destroy()
762 {
763 // NOTE: destroyTrack_l() can remove a strong reference to this Track
764 // by removing it from mTracks vector, so there is a risk that this Tracks's
765 // destructor is called. As the destructor needs to lock mLock,
766 // we must acquire a strong reference on this Track before locking mLock
767 // here so that the destructor is called only when exiting this function.
768 // On the other hand, as long as Track::destroy() is only called by
769 // TrackHandle destructor, the TrackHandle still holds a strong ref on
770 // this Track with its member mTrack.
771 sp<Track> keep(this);
772 { // scope for mLock
773 bool wasActive = false;
774 sp<ThreadBase> thread = mThread.promote();
775 if (thread != 0) {
776 Mutex::Autolock _l(thread->mLock);
777 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
778 wasActive = playbackThread->destroyTrack_l(this);
779 }
780 if (isExternalTrack() && !wasActive) {
781 AudioSystem::releaseOutput(mPortId);
782 }
783 }
784 forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
785 }
786
appendDumpHeader(String8 & result)787 void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
788 {
789 result.appendFormat("Type Id Active Client Session Port Id S Flags "
790 " Format Chn mask SRate "
791 "ST Usg CT "
792 " G db L dB R dB VS dB "
793 " Server FrmCnt FrmRdy F Underruns Flushed"
794 "%s\n",
795 isServerLatencySupported() ? " Latency" : "");
796 }
797
appendDump(String8 & result,bool active)798 void AudioFlinger::PlaybackThread::Track::appendDump(String8& result, bool active)
799 {
800 char trackType;
801 switch (mType) {
802 case TYPE_DEFAULT:
803 case TYPE_OUTPUT:
804 if (isStatic()) {
805 trackType = 'S'; // static
806 } else {
807 trackType = ' '; // normal
808 }
809 break;
810 case TYPE_PATCH:
811 trackType = 'P';
812 break;
813 default:
814 trackType = '?';
815 }
816
817 if (isFastTrack()) {
818 result.appendFormat("F%d %c %6d", mFastIndex, trackType, mId);
819 } else {
820 result.appendFormat(" %c %6d", trackType, mId);
821 }
822
823 char nowInUnderrun;
824 switch (mObservedUnderruns.mBitFields.mMostRecent) {
825 case UNDERRUN_FULL:
826 nowInUnderrun = ' ';
827 break;
828 case UNDERRUN_PARTIAL:
829 nowInUnderrun = '<';
830 break;
831 case UNDERRUN_EMPTY:
832 nowInUnderrun = '*';
833 break;
834 default:
835 nowInUnderrun = '?';
836 break;
837 }
838
839 char fillingStatus;
840 switch (mFillingUpStatus) {
841 case FS_INVALID:
842 fillingStatus = 'I';
843 break;
844 case FS_FILLING:
845 fillingStatus = 'f';
846 break;
847 case FS_FILLED:
848 fillingStatus = 'F';
849 break;
850 case FS_ACTIVE:
851 fillingStatus = 'A';
852 break;
853 default:
854 fillingStatus = '?';
855 break;
856 }
857
858 // clip framesReadySafe to max representation in dump
859 const size_t framesReadySafe =
860 std::min(mAudioTrackServerProxy->framesReadySafe(), (size_t)99999999);
861
862 // obtain volumes
863 const gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
864 const std::pair<float /* volume */, bool /* active */> vsVolume =
865 mVolumeHandler->getLastVolume();
866
867 // Our effective frame count is obtained by ServerProxy::getBufferSizeInFrames()
868 // as it may be reduced by the application.
869 const size_t bufferSizeInFrames = (size_t)mAudioTrackServerProxy->getBufferSizeInFrames();
870 // Check whether the buffer size has been modified by the app.
871 const char modifiedBufferChar = bufferSizeInFrames < mFrameCount
872 ? 'r' /* buffer reduced */: bufferSizeInFrames > mFrameCount
873 ? 'e' /* error */ : ' ' /* identical */;
874
875 result.appendFormat("%7s %6u %7u %7u %2s 0x%03X "
876 "%08X %08X %6u "
877 "%2u %3x %2x "
878 "%5.2g %5.2g %5.2g %5.2g%c "
879 "%08X %6zu%c %6zu %c %9u%c %7u",
880 active ? "yes" : "no",
881 (mClient == 0) ? getpid() : mClient->pid(),
882 mSessionId,
883 mPortId,
884 getTrackStateAsCodedString(),
885 mCblk->mFlags,
886
887 mFormat,
888 mChannelMask,
889 sampleRate(),
890
891 mStreamType,
892 mAttr.usage,
893 mAttr.content_type,
894
895 20.0 * log10(mFinalVolume),
896 20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
897 20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
898 20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
899 vsVolume.second ? 'A' : ' ', // if any VolumeShapers active
900
901 mCblk->mServer,
902 bufferSizeInFrames,
903 modifiedBufferChar,
904 framesReadySafe,
905 fillingStatus,
906 mAudioTrackServerProxy->getUnderrunFrames(),
907 nowInUnderrun,
908 (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000
909 );
910
911 if (isServerLatencySupported()) {
912 double latencyMs;
913 bool fromTrack;
914 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
915 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
916 // or 'k' if estimated from kernel because track frames haven't been presented yet.
917 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
918 } else {
919 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
920 }
921 }
922 result.append("\n");
923 }
924
sampleRate() const925 uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
926 return mAudioTrackServerProxy->getSampleRate();
927 }
928
929 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)930 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
931 {
932 ServerProxy::Buffer buf;
933 size_t desiredFrames = buffer->frameCount;
934 buf.mFrameCount = desiredFrames;
935 status_t status = mServerProxy->obtainBuffer(&buf);
936 buffer->frameCount = buf.mFrameCount;
937 buffer->raw = buf.mRaw;
938 if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused() && !isOffloaded()) {
939 ALOGV("%s(%d): underrun, framesReady(%zu) < framesDesired(%zd), state: %d",
940 __func__, mId, buf.mFrameCount, desiredFrames, (int)mState);
941 mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
942 } else {
943 mAudioTrackServerProxy->tallyUnderrunFrames(0);
944 }
945 return status;
946 }
947
releaseBuffer(AudioBufferProvider::Buffer * buffer)948 void AudioFlinger::PlaybackThread::Track::releaseBuffer(AudioBufferProvider::Buffer* buffer)
949 {
950 interceptBuffer(*buffer);
951 TrackBase::releaseBuffer(buffer);
952 }
953
954 // TODO: compensate for time shift between HW modules.
interceptBuffer(const AudioBufferProvider::Buffer & sourceBuffer)955 void AudioFlinger::PlaybackThread::Track::interceptBuffer(
956 const AudioBufferProvider::Buffer& sourceBuffer) {
957 auto start = std::chrono::steady_clock::now();
958 const size_t frameCount = sourceBuffer.frameCount;
959 if (frameCount == 0) {
960 return; // No audio to intercept.
961 // Additionally PatchProxyBufferProvider::obtainBuffer (called by PathTrack::getNextBuffer)
962 // does not allow 0 frame size request contrary to getNextBuffer
963 }
964 for (auto& teePatch : mTeePatches) {
965 RecordThread::PatchRecord* patchRecord = teePatch.patchRecord.get();
966 const size_t framesWritten = patchRecord->writeFrames(
967 sourceBuffer.i8, frameCount, mFrameSize);
968 const size_t framesLeft = frameCount - framesWritten;
969 ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
970 "buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->mId,
971 framesWritten, frameCount, framesLeft);
972 }
973 auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
974 using namespace std::chrono_literals;
975 // Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
976 ALOGD_IF(spent > 500us, "%s: took %lldus to intercept %zu tracks", __func__,
977 spent.count(), mTeePatches.size());
978 }
979
980 // ExtendedAudioBufferProvider interface
981
982 // framesReady() may return an approximation of the number of frames if called
983 // from a different thread than the one calling Proxy->obtainBuffer() and
984 // Proxy->releaseBuffer(). Also note there is no mutual exclusion in the
985 // AudioTrackServerProxy so be especially careful calling with FastTracks.
framesReady() const986 size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
987 if (mSharedBuffer != 0 && (isStopped() || isStopping())) {
988 // Static tracks return zero frames immediately upon stopping (for FastTracks).
989 // The remainder of the buffer is not drained.
990 return 0;
991 }
992 return mAudioTrackServerProxy->framesReady();
993 }
994
framesReleased() const995 int64_t AudioFlinger::PlaybackThread::Track::framesReleased() const
996 {
997 return mAudioTrackServerProxy->framesReleased();
998 }
999
onTimestamp(const ExtendedTimestamp & timestamp)1000 void AudioFlinger::PlaybackThread::Track::onTimestamp(const ExtendedTimestamp ×tamp)
1001 {
1002 // This call comes from a FastTrack and should be kept lockless.
1003 // The server side frames are already translated to client frames.
1004 mAudioTrackServerProxy->setTimestamp(timestamp);
1005
1006 // We do not set drained here, as FastTrack timestamp may not go to very last frame.
1007
1008 // Compute latency.
1009 // TODO: Consider whether the server latency may be passed in by FastMixer
1010 // as a constant for all active FastTracks.
1011 const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
1012 mServerLatencyFromTrack.store(true);
1013 mServerLatencyMs.store(latencyMs);
1014 }
1015
1016 // Don't call for fast tracks; the framesReady() could result in priority inversion
isReady() const1017 bool AudioFlinger::PlaybackThread::Track::isReady() const {
1018 if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
1019 return true;
1020 }
1021
1022 if (isStopping()) {
1023 if (framesReady() > 0) {
1024 mFillingUpStatus = FS_FILLED;
1025 }
1026 return true;
1027 }
1028
1029 size_t bufferSizeInFrames = mServerProxy->getBufferSizeInFrames();
1030 // Note: mServerProxy->getStartThresholdInFrames() is clamped.
1031 const size_t startThresholdInFrames = mServerProxy->getStartThresholdInFrames();
1032 const size_t framesToBeReady = std::clamp( // clamp again to validate client values.
1033 std::min(startThresholdInFrames, bufferSizeInFrames), size_t(1), mFrameCount);
1034
1035 if (framesReady() >= framesToBeReady || (mCblk->mFlags & CBLK_FORCEREADY)) {
1036 ALOGV("%s(%d): consider track ready with %zu/%zu, target was %zu)",
1037 __func__, mId, framesReady(), bufferSizeInFrames, framesToBeReady);
1038 mFillingUpStatus = FS_FILLED;
1039 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1040 return true;
1041 }
1042 return false;
1043 }
1044
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)1045 status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
1046 audio_session_t triggerSession __unused)
1047 {
1048 status_t status = NO_ERROR;
1049 ALOGV("%s(%d): calling pid %d session %d",
1050 __func__, mId, IPCThreadState::self()->getCallingPid(), mSessionId);
1051
1052 sp<ThreadBase> thread = mThread.promote();
1053 if (thread != 0) {
1054 if (isOffloaded()) {
1055 Mutex::Autolock _laf(thread->mAudioFlinger->mLock);
1056 Mutex::Autolock _lth(thread->mLock);
1057 sp<EffectChain> ec = thread->getEffectChain_l(mSessionId);
1058 if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() ||
1059 (ec != 0 && ec->isNonOffloadableEnabled())) {
1060 invalidate();
1061 return PERMISSION_DENIED;
1062 }
1063 }
1064 Mutex::Autolock _lth(thread->mLock);
1065 track_state state = mState;
1066 // here the track could be either new, or restarted
1067 // in both cases "unstop" the track
1068
1069 // initial state-stopping. next state-pausing.
1070 // What if resume is called ?
1071
1072 if (state == FLUSHED) {
1073 // avoid underrun glitches when starting after flush
1074 reset();
1075 }
1076
1077 // clear mPauseHwPending because of pause (and possibly flush) during underrun.
1078 mPauseHwPending = false;
1079 if (state == PAUSED || state == PAUSING) {
1080 if (mResumeToStopping) {
1081 // happened we need to resume to STOPPING_1
1082 mState = TrackBase::STOPPING_1;
1083 ALOGV("%s(%d): PAUSED => STOPPING_1 on thread %d",
1084 __func__, mId, (int)mThreadIoHandle);
1085 } else {
1086 mState = TrackBase::RESUMING;
1087 ALOGV("%s(%d): PAUSED => RESUMING on thread %d",
1088 __func__, mId, (int)mThreadIoHandle);
1089 }
1090 } else {
1091 mState = TrackBase::ACTIVE;
1092 ALOGV("%s(%d): ? => ACTIVE on thread %d",
1093 __func__, mId, (int)mThreadIoHandle);
1094 }
1095
1096 // states to reset position info for non-offloaded/direct tracks
1097 if (!isOffloaded() && !isDirect()
1098 && (state == IDLE || state == STOPPED || state == FLUSHED)) {
1099 mFrameMap.reset();
1100 }
1101 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1102 if (isFastTrack()) {
1103 // refresh fast track underruns on start because that field is never cleared
1104 // by the fast mixer; furthermore, the same track can be recycled, i.e. start
1105 // after stop.
1106 mObservedUnderruns = playbackThread->getFastTrackUnderruns(mFastIndex);
1107 }
1108 status = playbackThread->addTrack_l(this);
1109 if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
1110 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1111 // restore previous state if start was rejected by policy manager
1112 if (status == PERMISSION_DENIED) {
1113 mState = state;
1114 }
1115 }
1116
1117 // Audio timing metrics are computed a few mix cycles after starting.
1118 {
1119 mLogStartCountdown = LOG_START_COUNTDOWN;
1120 mLogStartTimeNs = systemTime();
1121 mLogStartFrames = mAudioTrackServerProxy->getTimestamp()
1122 .mPosition[ExtendedTimestamp::LOCATION_KERNEL];
1123 mLogLatencyMs = 0.;
1124 }
1125
1126 if (status == NO_ERROR || status == ALREADY_EXISTS) {
1127 // for streaming tracks, remove the buffer read stop limit.
1128 mAudioTrackServerProxy->start();
1129 }
1130
1131 // track was already in the active list, not a problem
1132 if (status == ALREADY_EXISTS) {
1133 status = NO_ERROR;
1134 } else {
1135 // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
1136 // It is usually unsafe to access the server proxy from a binder thread.
1137 // But in this case we know the mixer thread (whether normal mixer or fast mixer)
1138 // isn't looking at this track yet: we still hold the normal mixer thread lock,
1139 // and for fast tracks the track is not yet in the fast mixer thread's active set.
1140 // For static tracks, this is used to acknowledge change in position or loop.
1141 ServerProxy::Buffer buffer;
1142 buffer.mFrameCount = 1;
1143 (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
1144 }
1145 } else {
1146 status = BAD_VALUE;
1147 }
1148 if (status == NO_ERROR) {
1149 forEachTeePatchTrack([](auto patchTrack) { patchTrack->start(); });
1150 }
1151 return status;
1152 }
1153
stop()1154 void AudioFlinger::PlaybackThread::Track::stop()
1155 {
1156 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1157 sp<ThreadBase> thread = mThread.promote();
1158 if (thread != 0) {
1159 Mutex::Autolock _l(thread->mLock);
1160 track_state state = mState;
1161 if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
1162 // If the track is not active (PAUSED and buffers full), flush buffers
1163 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1164 if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1165 reset();
1166 mState = STOPPED;
1167 } else if (!isFastTrack() && !isOffloaded() && !isDirect()) {
1168 mState = STOPPED;
1169 } else {
1170 // For fast tracks prepareTracks_l() will set state to STOPPING_2
1171 // presentation is complete
1172 // For an offloaded track this starts a drain and state will
1173 // move to STOPPING_2 when drain completes and then STOPPED
1174 mState = STOPPING_1;
1175 if (isOffloaded()) {
1176 mRetryCount = PlaybackThread::kMaxTrackStopRetriesOffload;
1177 }
1178 }
1179 playbackThread->broadcast_l();
1180 ALOGV("%s(%d): not stopping/stopped => stopping/stopped on thread %d",
1181 __func__, mId, (int)mThreadIoHandle);
1182 }
1183 }
1184 forEachTeePatchTrack([](auto patchTrack) { patchTrack->stop(); });
1185 }
1186
pause()1187 void AudioFlinger::PlaybackThread::Track::pause()
1188 {
1189 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1190 sp<ThreadBase> thread = mThread.promote();
1191 if (thread != 0) {
1192 Mutex::Autolock _l(thread->mLock);
1193 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1194 switch (mState) {
1195 case STOPPING_1:
1196 case STOPPING_2:
1197 if (!isOffloaded()) {
1198 /* nothing to do if track is not offloaded */
1199 break;
1200 }
1201
1202 // Offloaded track was draining, we need to carry on draining when resumed
1203 mResumeToStopping = true;
1204 FALLTHROUGH_INTENDED;
1205 case ACTIVE:
1206 case RESUMING:
1207 mState = PAUSING;
1208 ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
1209 __func__, mId, (int)mThreadIoHandle);
1210 if (isOffloadedOrDirect()) {
1211 mPauseHwPending = true;
1212 }
1213 playbackThread->broadcast_l();
1214 break;
1215
1216 default:
1217 break;
1218 }
1219 }
1220 // Pausing the TeePatch to avoid a glitch on underrun, at the cost of buffered audio loss.
1221 forEachTeePatchTrack([](auto patchTrack) { patchTrack->pause(); });
1222 }
1223
flush()1224 void AudioFlinger::PlaybackThread::Track::flush()
1225 {
1226 ALOGV("%s(%d)", __func__, mId);
1227 sp<ThreadBase> thread = mThread.promote();
1228 if (thread != 0) {
1229 Mutex::Autolock _l(thread->mLock);
1230 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1231
1232 // Flush the ring buffer now if the track is not active in the PlaybackThread.
1233 // Otherwise the flush would not be done until the track is resumed.
1234 // Requires FastTrack removal be BLOCK_UNTIL_ACKED
1235 if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1236 (void)mServerProxy->flushBufferIfNeeded();
1237 }
1238
1239 if (isOffloaded()) {
1240 // If offloaded we allow flush during any state except terminated
1241 // and keep the track active to avoid problems if user is seeking
1242 // rapidly and underlying hardware has a significant delay handling
1243 // a pause
1244 if (isTerminated()) {
1245 return;
1246 }
1247
1248 ALOGV("%s(%d): offload flush", __func__, mId);
1249 reset();
1250
1251 if (mState == STOPPING_1 || mState == STOPPING_2) {
1252 ALOGV("%s(%d): flushed in STOPPING_1 or 2 state, change state to ACTIVE",
1253 __func__, mId);
1254 mState = ACTIVE;
1255 }
1256
1257 mFlushHwPending = true;
1258 mResumeToStopping = false;
1259 } else {
1260 if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
1261 mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
1262 return;
1263 }
1264 // No point remaining in PAUSED state after a flush => go to
1265 // FLUSHED state
1266 mState = FLUSHED;
1267 // do not reset the track if it is still in the process of being stopped or paused.
1268 // this will be done by prepareTracks_l() when the track is stopped.
1269 // prepareTracks_l() will see mState == FLUSHED, then
1270 // remove from active track list, reset(), and trigger presentation complete
1271 if (isDirect()) {
1272 mFlushHwPending = true;
1273 }
1274 if (playbackThread->mActiveTracks.indexOf(this) < 0) {
1275 reset();
1276 }
1277 }
1278 // Prevent flush being lost if the track is flushed and then resumed
1279 // before mixer thread can run. This is important when offloading
1280 // because the hardware buffer could hold a large amount of audio
1281 playbackThread->broadcast_l();
1282 }
1283 // Flush the Tee to avoid on resume playing old data and glitching on the transition to new data
1284 forEachTeePatchTrack([](auto patchTrack) { patchTrack->flush(); });
1285 }
1286
1287 // must be called with thread lock held
flushAck()1288 void AudioFlinger::PlaybackThread::Track::flushAck()
1289 {
1290 if (!isOffloaded() && !isDirect())
1291 return;
1292
1293 // Clear the client ring buffer so that the app can prime the buffer while paused.
1294 // Otherwise it might not get cleared until playback is resumed and obtainBuffer() is called.
1295 mServerProxy->flushBufferIfNeeded();
1296
1297 mFlushHwPending = false;
1298 }
1299
pauseAck()1300 void AudioFlinger::PlaybackThread::Track::pauseAck()
1301 {
1302 mPauseHwPending = false;
1303 }
1304
reset()1305 void AudioFlinger::PlaybackThread::Track::reset()
1306 {
1307 // Do not reset twice to avoid discarding data written just after a flush and before
1308 // the audioflinger thread detects the track is stopped.
1309 if (!mResetDone) {
1310 // Force underrun condition to avoid false underrun callback until first data is
1311 // written to buffer
1312 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1313 mFillingUpStatus = FS_FILLING;
1314 mResetDone = true;
1315 if (mState == FLUSHED) {
1316 mState = IDLE;
1317 }
1318 }
1319 }
1320
setParameters(const String8 & keyValuePairs)1321 status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
1322 {
1323 sp<ThreadBase> thread = mThread.promote();
1324 if (thread == 0) {
1325 ALOGE("%s(%d): thread is dead", __func__, mId);
1326 return FAILED_TRANSACTION;
1327 } else if ((thread->type() == ThreadBase::DIRECT) ||
1328 (thread->type() == ThreadBase::OFFLOAD)) {
1329 return thread->setParameters(keyValuePairs);
1330 } else {
1331 return PERMISSION_DENIED;
1332 }
1333 }
1334
selectPresentation(int presentationId,int programId)1335 status_t AudioFlinger::PlaybackThread::Track::selectPresentation(int presentationId,
1336 int programId) {
1337 sp<ThreadBase> thread = mThread.promote();
1338 if (thread == 0) {
1339 ALOGE("thread is dead");
1340 return FAILED_TRANSACTION;
1341 } else if ((thread->type() == ThreadBase::DIRECT) || (thread->type() == ThreadBase::OFFLOAD)) {
1342 DirectOutputThread *directOutputThread = static_cast<DirectOutputThread*>(thread.get());
1343 return directOutputThread->selectPresentation(presentationId, programId);
1344 }
1345 return INVALID_OPERATION;
1346 }
1347
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)1348 VolumeShaper::Status AudioFlinger::PlaybackThread::Track::applyVolumeShaper(
1349 const sp<VolumeShaper::Configuration>& configuration,
1350 const sp<VolumeShaper::Operation>& operation)
1351 {
1352 sp<VolumeShaper::Configuration> newConfiguration;
1353
1354 if (isOffloadedOrDirect()) {
1355 const VolumeShaper::Configuration::OptionFlag optionFlag
1356 = configuration->getOptionFlags();
1357 if ((optionFlag & VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME) == 0) {
1358 ALOGW("%s(%d): %s tracks do not support frame counted VolumeShaper,"
1359 " using clock time instead",
1360 __func__, mId,
1361 isOffloaded() ? "Offload" : "Direct");
1362 newConfiguration = new VolumeShaper::Configuration(*configuration);
1363 newConfiguration->setOptionFlags(
1364 VolumeShaper::Configuration::OptionFlag(optionFlag
1365 | VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME));
1366 }
1367 }
1368
1369 VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(
1370 (newConfiguration.get() != nullptr ? newConfiguration : configuration), operation);
1371
1372 if (isOffloadedOrDirect()) {
1373 // Signal thread to fetch new volume.
1374 sp<ThreadBase> thread = mThread.promote();
1375 if (thread != 0) {
1376 Mutex::Autolock _l(thread->mLock);
1377 thread->broadcast_l();
1378 }
1379 }
1380 return status;
1381 }
1382
getVolumeShaperState(int id)1383 sp<VolumeShaper::State> AudioFlinger::PlaybackThread::Track::getVolumeShaperState(int id)
1384 {
1385 // Note: We don't check if Thread exists.
1386
1387 // mVolumeHandler is thread safe.
1388 return mVolumeHandler->getVolumeShaperState(id);
1389 }
1390
setFinalVolume(float volume)1391 void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volume)
1392 {
1393 if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
1394 mFinalVolume = volume;
1395 setMetadataHasChanged();
1396 mTrackMetrics.logVolume(volume);
1397 }
1398 }
1399
copyMetadataTo(MetadataInserter & backInserter) const1400 void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
1401 {
1402 playback_track_metadata_v7_t metadata;
1403 metadata.base = {
1404 .usage = mAttr.usage,
1405 .content_type = mAttr.content_type,
1406 .gain = mFinalVolume,
1407 };
1408 metadata.channel_mask = mChannelMask,
1409 strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
1410 *backInserter++ = metadata;
1411 }
1412
setTeePatches(TeePatches teePatches)1413 void AudioFlinger::PlaybackThread::Track::setTeePatches(TeePatches teePatches) {
1414 forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
1415 mTeePatches = std::move(teePatches);
1416 if (mState == TrackBase::ACTIVE || mState == TrackBase::RESUMING ||
1417 mState == TrackBase::STOPPING_1) {
1418 forEachTeePatchTrack([](auto patchTrack) { patchTrack->start(); });
1419 }
1420 }
1421
getTimestamp(AudioTimestamp & timestamp)1422 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
1423 {
1424 if (!isOffloaded() && !isDirect()) {
1425 return INVALID_OPERATION; // normal tracks handled through SSQ
1426 }
1427 sp<ThreadBase> thread = mThread.promote();
1428 if (thread == 0) {
1429 return INVALID_OPERATION;
1430 }
1431
1432 Mutex::Autolock _l(thread->mLock);
1433 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1434 return playbackThread->getTimestamp_l(timestamp);
1435 }
1436
attachAuxEffect(int EffectId)1437 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
1438 {
1439 sp<ThreadBase> thread = mThread.promote();
1440 if (thread == nullptr) {
1441 return DEAD_OBJECT;
1442 }
1443
1444 sp<PlaybackThread> dstThread = (PlaybackThread *)thread.get();
1445 sp<PlaybackThread> srcThread; // srcThread is initialized by call to moveAuxEffectToIo()
1446 sp<AudioFlinger> af = mClient->audioFlinger();
1447 status_t status = af->moveAuxEffectToIo(EffectId, dstThread, &srcThread);
1448
1449 if (EffectId != 0 && status == NO_ERROR) {
1450 status = dstThread->attachAuxEffect(this, EffectId);
1451 if (status == NO_ERROR) {
1452 AudioSystem::moveEffectsToIo(std::vector<int>(EffectId), dstThread->id());
1453 }
1454 }
1455
1456 if (status != NO_ERROR && srcThread != nullptr) {
1457 af->moveAuxEffectToIo(EffectId, srcThread, &dstThread);
1458 }
1459 return status;
1460 }
1461
setAuxBuffer(int EffectId,int32_t * buffer)1462 void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
1463 {
1464 mAuxEffectId = EffectId;
1465 mAuxBuffer = buffer;
1466 }
1467
1468 // presentationComplete verified by frames, used by Mixed tracks.
presentationComplete(int64_t framesWritten,size_t audioHalFrames)1469 bool AudioFlinger::PlaybackThread::Track::presentationComplete(
1470 int64_t framesWritten, size_t audioHalFrames)
1471 {
1472 // TODO: improve this based on FrameMap if it exists, to ensure full drain.
1473 // This assists in proper timestamp computation as well as wakelock management.
1474
1475 // a track is considered presented when the total number of frames written to audio HAL
1476 // corresponds to the number of frames written when presentationComplete() is called for the
1477 // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
1478 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1479 // to detect when all frames have been played. In this case framesWritten isn't
1480 // useful because it doesn't always reflect whether there is data in the h/w
1481 // buffers, particularly if a track has been paused and resumed during draining
1482 ALOGV("%s(%d): presentationComplete() mPresentationCompleteFrames %lld framesWritten %lld",
1483 __func__, mId,
1484 (long long)mPresentationCompleteFrames, (long long)framesWritten);
1485 if (mPresentationCompleteFrames == 0) {
1486 mPresentationCompleteFrames = framesWritten + audioHalFrames;
1487 ALOGV("%s(%d): set:"
1488 " mPresentationCompleteFrames %lld audioHalFrames %zu",
1489 __func__, mId,
1490 (long long)mPresentationCompleteFrames, audioHalFrames);
1491 }
1492
1493 bool complete;
1494 if (isFastTrack()) { // does not go through linear map
1495 complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
1496 ALOGV("%s(%d): %s framesWritten:%lld mPresentationCompleteFrames:%lld",
1497 __func__, mId, (complete ? "complete" : "waiting"),
1498 (long long) framesWritten, (long long) mPresentationCompleteFrames);
1499 } else { // Normal tracks, OutputTracks, and PatchTracks
1500 complete = framesWritten >= (int64_t) mPresentationCompleteFrames
1501 && mAudioTrackServerProxy->isDrained();
1502 }
1503
1504 if (complete) {
1505 notifyPresentationComplete();
1506 return true;
1507 }
1508 return false;
1509 }
1510
1511 // presentationComplete checked by time, used by DirectTracks.
presentationComplete(uint32_t latencyMs)1512 bool AudioFlinger::PlaybackThread::Track::presentationComplete(uint32_t latencyMs)
1513 {
1514 // For Offloaded or Direct tracks.
1515
1516 // For a direct track, we incorporated time based testing for presentationComplete.
1517
1518 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1519 // to detect when all frames have been played. In this case latencyMs isn't
1520 // useful because it doesn't always reflect whether there is data in the h/w
1521 // buffers, particularly if a track has been paused and resumed during draining
1522
1523 constexpr float MIN_SPEED = 0.125f; // min speed scaling allowed for timely response.
1524 if (mPresentationCompleteTimeNs == 0) {
1525 mPresentationCompleteTimeNs = systemTime() + latencyMs * 1e6 / fmax(mSpeed, MIN_SPEED);
1526 ALOGV("%s(%d): set: latencyMs %u mPresentationCompleteTimeNs:%lld",
1527 __func__, mId, latencyMs, (long long) mPresentationCompleteTimeNs);
1528 }
1529
1530 bool complete;
1531 if (isOffloaded()) {
1532 complete = true;
1533 } else { // Direct
1534 complete = systemTime() >= mPresentationCompleteTimeNs;
1535 ALOGV("%s(%d): %s", __func__, mId, (complete ? "complete" : "waiting"));
1536 }
1537 if (complete) {
1538 notifyPresentationComplete();
1539 return true;
1540 }
1541 return false;
1542 }
1543
notifyPresentationComplete()1544 void AudioFlinger::PlaybackThread::Track::notifyPresentationComplete()
1545 {
1546 // This only triggers once. TODO: should we enforce this?
1547 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1548 mAudioTrackServerProxy->setStreamEndDone();
1549 }
1550
triggerEvents(AudioSystem::sync_event_t type)1551 void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
1552 {
1553 for (size_t i = 0; i < mSyncEvents.size();) {
1554 if (mSyncEvents[i]->type() == type) {
1555 mSyncEvents[i]->trigger();
1556 mSyncEvents.removeAt(i);
1557 } else {
1558 ++i;
1559 }
1560 }
1561 }
1562
1563 // implement VolumeBufferProvider interface
1564
getVolumeLR()1565 gain_minifloat_packed_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
1566 {
1567 // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
1568 ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
1569 gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
1570 float vl = float_from_gain(gain_minifloat_unpack_left(vlr));
1571 float vr = float_from_gain(gain_minifloat_unpack_right(vlr));
1572 // track volumes come from shared memory, so can't be trusted and must be clamped
1573 if (vl > GAIN_FLOAT_UNITY) {
1574 vl = GAIN_FLOAT_UNITY;
1575 }
1576 if (vr > GAIN_FLOAT_UNITY) {
1577 vr = GAIN_FLOAT_UNITY;
1578 }
1579 // now apply the cached master volume and stream type volume;
1580 // this is trusted but lacks any synchronization or barrier so may be stale
1581 float v = mCachedVolume;
1582 vl *= v;
1583 vr *= v;
1584 // re-combine into packed minifloat
1585 vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr));
1586 // FIXME look at mute, pause, and stop flags
1587 return vlr;
1588 }
1589
setSyncEvent(const sp<SyncEvent> & event)1590 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
1591 {
1592 if (isTerminated() || mState == PAUSED ||
1593 ((framesReady() == 0) && ((mSharedBuffer != 0) ||
1594 (mState == STOPPED)))) {
1595 ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
1596 __func__, mId,
1597 (int)mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
1598 event->cancel();
1599 return INVALID_OPERATION;
1600 }
1601 (void) TrackBase::setSyncEvent(event);
1602 return NO_ERROR;
1603 }
1604
invalidate()1605 void AudioFlinger::PlaybackThread::Track::invalidate()
1606 {
1607 TrackBase::invalidate();
1608 signalClientFlag(CBLK_INVALID);
1609 }
1610
disable()1611 void AudioFlinger::PlaybackThread::Track::disable()
1612 {
1613 // TODO(b/142394888): the filling status should also be reset to filling
1614 signalClientFlag(CBLK_DISABLED);
1615 }
1616
signalClientFlag(int32_t flag)1617 void AudioFlinger::PlaybackThread::Track::signalClientFlag(int32_t flag)
1618 {
1619 // FIXME should use proxy, and needs work
1620 audio_track_cblk_t* cblk = mCblk;
1621 android_atomic_or(flag, &cblk->mFlags);
1622 android_atomic_release_store(0x40000000, &cblk->mFutex);
1623 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
1624 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
1625 }
1626
signal()1627 void AudioFlinger::PlaybackThread::Track::signal()
1628 {
1629 sp<ThreadBase> thread = mThread.promote();
1630 if (thread != 0) {
1631 PlaybackThread *t = (PlaybackThread *)thread.get();
1632 Mutex::Autolock _l(t->mLock);
1633 t->broadcast_l();
1634 }
1635 }
1636
getDualMonoMode(audio_dual_mono_mode_t * mode)1637 status_t AudioFlinger::PlaybackThread::Track::getDualMonoMode(audio_dual_mono_mode_t* mode)
1638 {
1639 status_t status = INVALID_OPERATION;
1640 if (isOffloadedOrDirect()) {
1641 sp<ThreadBase> thread = mThread.promote();
1642 if (thread != nullptr) {
1643 PlaybackThread *t = (PlaybackThread *)thread.get();
1644 Mutex::Autolock _l(t->mLock);
1645 status = t->mOutput->stream->getDualMonoMode(mode);
1646 ALOGD_IF((status == NO_ERROR) && (mDualMonoMode != *mode),
1647 "%s: mode %d inconsistent", __func__, mDualMonoMode);
1648 }
1649 }
1650 return status;
1651 }
1652
setDualMonoMode(audio_dual_mono_mode_t mode)1653 status_t AudioFlinger::PlaybackThread::Track::setDualMonoMode(audio_dual_mono_mode_t mode)
1654 {
1655 status_t status = INVALID_OPERATION;
1656 if (isOffloadedOrDirect()) {
1657 sp<ThreadBase> thread = mThread.promote();
1658 if (thread != nullptr) {
1659 auto t = static_cast<PlaybackThread *>(thread.get());
1660 Mutex::Autolock lock(t->mLock);
1661 status = t->mOutput->stream->setDualMonoMode(mode);
1662 if (status == NO_ERROR) {
1663 mDualMonoMode = mode;
1664 }
1665 }
1666 }
1667 return status;
1668 }
1669
getAudioDescriptionMixLevel(float * leveldB)1670 status_t AudioFlinger::PlaybackThread::Track::getAudioDescriptionMixLevel(float* leveldB)
1671 {
1672 status_t status = INVALID_OPERATION;
1673 if (isOffloadedOrDirect()) {
1674 sp<ThreadBase> thread = mThread.promote();
1675 if (thread != nullptr) {
1676 auto t = static_cast<PlaybackThread *>(thread.get());
1677 Mutex::Autolock lock(t->mLock);
1678 status = t->mOutput->stream->getAudioDescriptionMixLevel(leveldB);
1679 ALOGD_IF((status == NO_ERROR) && (mAudioDescriptionMixLevel != *leveldB),
1680 "%s: level %.3f inconsistent", __func__, mAudioDescriptionMixLevel);
1681 }
1682 }
1683 return status;
1684 }
1685
setAudioDescriptionMixLevel(float leveldB)1686 status_t AudioFlinger::PlaybackThread::Track::setAudioDescriptionMixLevel(float leveldB)
1687 {
1688 status_t status = INVALID_OPERATION;
1689 if (isOffloadedOrDirect()) {
1690 sp<ThreadBase> thread = mThread.promote();
1691 if (thread != nullptr) {
1692 auto t = static_cast<PlaybackThread *>(thread.get());
1693 Mutex::Autolock lock(t->mLock);
1694 status = t->mOutput->stream->setAudioDescriptionMixLevel(leveldB);
1695 if (status == NO_ERROR) {
1696 mAudioDescriptionMixLevel = leveldB;
1697 }
1698 }
1699 }
1700 return status;
1701 }
1702
getPlaybackRateParameters(audio_playback_rate_t * playbackRate)1703 status_t AudioFlinger::PlaybackThread::Track::getPlaybackRateParameters(
1704 audio_playback_rate_t* playbackRate)
1705 {
1706 status_t status = INVALID_OPERATION;
1707 if (isOffloadedOrDirect()) {
1708 sp<ThreadBase> thread = mThread.promote();
1709 if (thread != nullptr) {
1710 auto t = static_cast<PlaybackThread *>(thread.get());
1711 Mutex::Autolock lock(t->mLock);
1712 status = t->mOutput->stream->getPlaybackRateParameters(playbackRate);
1713 ALOGD_IF((status == NO_ERROR) &&
1714 !isAudioPlaybackRateEqual(mPlaybackRateParameters, *playbackRate),
1715 "%s: playbackRate inconsistent", __func__);
1716 }
1717 }
1718 return status;
1719 }
1720
setPlaybackRateParameters(const audio_playback_rate_t & playbackRate)1721 status_t AudioFlinger::PlaybackThread::Track::setPlaybackRateParameters(
1722 const audio_playback_rate_t& playbackRate)
1723 {
1724 status_t status = INVALID_OPERATION;
1725 if (isOffloadedOrDirect()) {
1726 sp<ThreadBase> thread = mThread.promote();
1727 if (thread != nullptr) {
1728 auto t = static_cast<PlaybackThread *>(thread.get());
1729 Mutex::Autolock lock(t->mLock);
1730 status = t->mOutput->stream->setPlaybackRateParameters(playbackRate);
1731 if (status == NO_ERROR) {
1732 mPlaybackRateParameters = playbackRate;
1733 }
1734 }
1735 }
1736 return status;
1737 }
1738
1739 //To be called with thread lock held
isResumePending()1740 bool AudioFlinger::PlaybackThread::Track::isResumePending() {
1741
1742 if (mState == RESUMING)
1743 return true;
1744 /* Resume is pending if track was stopping before pause was called */
1745 if (mState == STOPPING_1 &&
1746 mResumeToStopping)
1747 return true;
1748
1749 return false;
1750 }
1751
1752 //To be called with thread lock held
resumeAck()1753 void AudioFlinger::PlaybackThread::Track::resumeAck() {
1754
1755
1756 if (mState == RESUMING)
1757 mState = ACTIVE;
1758
1759 // Other possibility of pending resume is stopping_1 state
1760 // Do not update the state from stopping as this prevents
1761 // drain being called.
1762 if (mState == STOPPING_1) {
1763 mResumeToStopping = false;
1764 }
1765 }
1766
1767 //To be called with thread lock held
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sinkFramesWritten,uint32_t halSampleRate,const ExtendedTimestamp & timeStamp)1768 void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
1769 int64_t trackFramesReleased, int64_t sinkFramesWritten,
1770 uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
1771 // Make the kernel frametime available.
1772 const FrameTime ft{
1773 timeStamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
1774 timeStamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
1775 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
1776 mKernelFrameTime.store(ft);
1777 if (!audio_is_linear_pcm(mFormat)) {
1778 return;
1779 }
1780
1781 //update frame map
1782 mFrameMap.push(trackFramesReleased, sinkFramesWritten);
1783
1784 // adjust server times and set drained state.
1785 //
1786 // Our timestamps are only updated when the track is on the Thread active list.
1787 // We need to ensure that tracks are not removed before full drain.
1788 ExtendedTimestamp local = timeStamp;
1789 bool drained = true; // default assume drained, if no server info found
1790 bool checked = false;
1791 for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
1792 i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
1793 // Lookup the track frame corresponding to the sink frame position.
1794 if (local.mTimeNs[i] > 0) {
1795 local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
1796 // check drain state from the latest stage in the pipeline.
1797 if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
1798 drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
1799 checked = true;
1800 }
1801 }
1802 }
1803
1804 mAudioTrackServerProxy->setDrained(drained);
1805 // Set correction for flushed frames that are not accounted for in released.
1806 local.mFlushed = mAudioTrackServerProxy->framesFlushed();
1807 mServerProxy->setTimestamp(local);
1808
1809 // Compute latency info.
1810 const bool useTrackTimestamp = !drained;
1811 const double latencyMs = useTrackTimestamp
1812 ? local.getOutputServerLatencyMs(sampleRate())
1813 : timeStamp.getOutputServerLatencyMs(halSampleRate);
1814
1815 mServerLatencyFromTrack.store(useTrackTimestamp);
1816 mServerLatencyMs.store(latencyMs);
1817
1818 if (mLogStartCountdown > 0
1819 && local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0
1820 && local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] > 0)
1821 {
1822 if (mLogStartCountdown > 1) {
1823 --mLogStartCountdown;
1824 } else if (latencyMs < mLogLatencyMs) { // wait for latency to stabilize (dip)
1825 mLogStartCountdown = 0;
1826 // startup is the difference in times for the current timestamp and our start
1827 double startUpMs =
1828 (local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartTimeNs) * 1e-6;
1829 // adjust for frames played.
1830 startUpMs -= (local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartFrames)
1831 * 1e3 / mSampleRate;
1832 ALOGV("%s: latencyMs:%lf startUpMs:%lf"
1833 " localTime:%lld startTime:%lld"
1834 " localPosition:%lld startPosition:%lld",
1835 __func__, latencyMs, startUpMs,
1836 (long long)local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
1837 (long long)mLogStartTimeNs,
1838 (long long)local.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
1839 (long long)mLogStartFrames);
1840 mTrackMetrics.logLatencyAndStartup(latencyMs, startUpMs);
1841 }
1842 mLogLatencyMs = latencyMs;
1843 }
1844 }
1845
mute(bool * ret)1846 binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::mute(
1847 /*out*/ bool *ret) {
1848 *ret = false;
1849 sp<ThreadBase> thread = mTrack->mThread.promote();
1850 if (thread != 0) {
1851 // Lock for updating mHapticPlaybackEnabled.
1852 Mutex::Autolock _l(thread->mLock);
1853 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1854 if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
1855 && playbackThread->mHapticChannelCount > 0) {
1856 mTrack->setHapticPlaybackEnabled(false);
1857 *ret = true;
1858 }
1859 }
1860 return binder::Status::ok();
1861 }
1862
unmute(bool * ret)1863 binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::unmute(
1864 /*out*/ bool *ret) {
1865 *ret = false;
1866 sp<ThreadBase> thread = mTrack->mThread.promote();
1867 if (thread != 0) {
1868 // Lock for updating mHapticPlaybackEnabled.
1869 Mutex::Autolock _l(thread->mLock);
1870 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
1871 if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
1872 && playbackThread->mHapticChannelCount > 0) {
1873 mTrack->setHapticPlaybackEnabled(true);
1874 *ret = true;
1875 }
1876 }
1877 return binder::Status::ok();
1878 }
1879
1880 // ----------------------------------------------------------------------------
1881 #undef LOG_TAG
1882 #define LOG_TAG "AF::OutputTrack"
1883
OutputTrack(PlaybackThread * playbackThread,DuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)1884 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
1885 PlaybackThread *playbackThread,
1886 DuplicatingThread *sourceThread,
1887 uint32_t sampleRate,
1888 audio_format_t format,
1889 audio_channel_mask_t channelMask,
1890 size_t frameCount,
1891 const AttributionSourceState& attributionSource)
1892 : Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
1893 audio_attributes_t{} /* currently unused for output track */,
1894 sampleRate, format, channelMask, frameCount,
1895 nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
1896 AUDIO_SESSION_NONE, getpid(), attributionSource, AUDIO_OUTPUT_FLAG_NONE,
1897 TYPE_OUTPUT),
1898 mActive(false), mSourceThread(sourceThread)
1899 {
1900
1901 if (mCblk != NULL) {
1902 mOutBuffer.frameCount = 0;
1903 playbackThread->mTracks.add(this);
1904 ALOGV("%s(): mCblk %p, mBuffer %p, "
1905 "frameCount %zu, mChannelMask 0x%08x",
1906 __func__, mCblk, mBuffer,
1907 frameCount, mChannelMask);
1908 // since client and server are in the same process,
1909 // the buffer has the same virtual address on both sides
1910 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
1911 true /*clientInServer*/);
1912 mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1913 mClientProxy->setSendLevel(0.0);
1914 mClientProxy->setSampleRate(sampleRate);
1915 } else {
1916 ALOGW("%s(%d): Error creating output track on thread %d",
1917 __func__, mId, (int)mThreadIoHandle);
1918 }
1919 }
1920
~OutputTrack()1921 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
1922 {
1923 clearBufferQueue();
1924 // superclass destructor will now delete the server proxy and shared memory both refer to
1925 }
1926
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)1927 status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
1928 audio_session_t triggerSession)
1929 {
1930 status_t status = Track::start(event, triggerSession);
1931 if (status != NO_ERROR) {
1932 return status;
1933 }
1934
1935 mActive = true;
1936 mRetryCount = 127;
1937 return status;
1938 }
1939
stop()1940 void AudioFlinger::PlaybackThread::OutputTrack::stop()
1941 {
1942 Track::stop();
1943 clearBufferQueue();
1944 mOutBuffer.frameCount = 0;
1945 mActive = false;
1946 }
1947
write(void * data,uint32_t frames)1948 ssize_t AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
1949 {
1950 Buffer *pInBuffer;
1951 Buffer inBuffer;
1952 bool outputBufferFull = false;
1953 inBuffer.frameCount = frames;
1954 inBuffer.raw = data;
1955
1956 uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
1957
1958 if (!mActive && frames != 0) {
1959 (void) start();
1960 }
1961
1962 while (waitTimeLeftMs) {
1963 // First write pending buffers, then new data
1964 if (mBufferQueue.size()) {
1965 pInBuffer = mBufferQueue.itemAt(0);
1966 } else {
1967 pInBuffer = &inBuffer;
1968 }
1969
1970 if (pInBuffer->frameCount == 0) {
1971 break;
1972 }
1973
1974 if (mOutBuffer.frameCount == 0) {
1975 mOutBuffer.frameCount = pInBuffer->frameCount;
1976 nsecs_t startTime = systemTime();
1977 status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
1978 if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
1979 ALOGV("%s(%d): thread %d no more output buffers; status %d",
1980 __func__, mId,
1981 (int)mThreadIoHandle, status);
1982 outputBufferFull = true;
1983 break;
1984 }
1985 uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
1986 if (waitTimeLeftMs >= waitTimeMs) {
1987 waitTimeLeftMs -= waitTimeMs;
1988 } else {
1989 waitTimeLeftMs = 0;
1990 }
1991 if (status == NOT_ENOUGH_DATA) {
1992 restartIfDisabled();
1993 continue;
1994 }
1995 }
1996
1997 uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
1998 pInBuffer->frameCount;
1999 memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
2000 Proxy::Buffer buf;
2001 buf.mFrameCount = outFrames;
2002 buf.mRaw = NULL;
2003 mClientProxy->releaseBuffer(&buf);
2004 restartIfDisabled();
2005 pInBuffer->frameCount -= outFrames;
2006 pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
2007 mOutBuffer.frameCount -= outFrames;
2008 mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
2009
2010 if (pInBuffer->frameCount == 0) {
2011 if (mBufferQueue.size()) {
2012 mBufferQueue.removeAt(0);
2013 free(pInBuffer->mBuffer);
2014 if (pInBuffer != &inBuffer) {
2015 delete pInBuffer;
2016 }
2017 ALOGV("%s(%d): thread %d released overflow buffer %zu",
2018 __func__, mId,
2019 (int)mThreadIoHandle, mBufferQueue.size());
2020 } else {
2021 break;
2022 }
2023 }
2024 }
2025
2026 // If we could not write all frames, allocate a buffer and queue it for next time.
2027 if (inBuffer.frameCount) {
2028 sp<ThreadBase> thread = mThread.promote();
2029 if (thread != 0 && !thread->standby()) {
2030 if (mBufferQueue.size() < kMaxOverFlowBuffers) {
2031 pInBuffer = new Buffer;
2032 pInBuffer->mBuffer = malloc(inBuffer.frameCount * mFrameSize);
2033 pInBuffer->frameCount = inBuffer.frameCount;
2034 pInBuffer->raw = pInBuffer->mBuffer;
2035 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
2036 mBufferQueue.add(pInBuffer);
2037 ALOGV("%s(%d): thread %d adding overflow buffer %zu", __func__, mId,
2038 (int)mThreadIoHandle, mBufferQueue.size());
2039 // audio data is consumed (stored locally); set frameCount to 0.
2040 inBuffer.frameCount = 0;
2041 } else {
2042 ALOGW("%s(%d): thread %d no more overflow buffers",
2043 __func__, mId, (int)mThreadIoHandle);
2044 // TODO: return error for this.
2045 }
2046 }
2047 }
2048
2049 // Calling write() with a 0 length buffer means that no more data will be written:
2050 // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
2051 if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
2052 stop();
2053 }
2054
2055 return frames - inBuffer.frameCount; // number of frames consumed.
2056 }
2057
copyMetadataTo(MetadataInserter & backInserter) const2058 void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
2059 {
2060 std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
2061 backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
2062 }
2063
setMetadatas(const SourceMetadatas & metadatas)2064 void AudioFlinger::PlaybackThread::OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
2065 {
2066 std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
2067 mTrackMetadatas = metadatas;
2068 }
2069 // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
2070 setMetadataHasChanged();
2071 }
2072
obtainBuffer(AudioBufferProvider::Buffer * buffer,uint32_t waitTimeMs)2073 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
2074 AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
2075 {
2076 ClientProxy::Buffer buf;
2077 buf.mFrameCount = buffer->frameCount;
2078 struct timespec timeout;
2079 timeout.tv_sec = waitTimeMs / 1000;
2080 timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
2081 status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
2082 buffer->frameCount = buf.mFrameCount;
2083 buffer->raw = buf.mRaw;
2084 return status;
2085 }
2086
clearBufferQueue()2087 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
2088 {
2089 size_t size = mBufferQueue.size();
2090
2091 for (size_t i = 0; i < size; i++) {
2092 Buffer *pBuffer = mBufferQueue.itemAt(i);
2093 free(pBuffer->mBuffer);
2094 delete pBuffer;
2095 }
2096 mBufferQueue.clear();
2097 }
2098
restartIfDisabled()2099 void AudioFlinger::PlaybackThread::OutputTrack::restartIfDisabled()
2100 {
2101 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2102 if (mActive && (flags & CBLK_DISABLED)) {
2103 start();
2104 }
2105 }
2106
2107 // ----------------------------------------------------------------------------
2108 #undef LOG_TAG
2109 #define LOG_TAG "AF::PatchTrack"
2110
PatchTrack(PlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady)2111 AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
2112 audio_stream_type_t streamType,
2113 uint32_t sampleRate,
2114 audio_channel_mask_t channelMask,
2115 audio_format_t format,
2116 size_t frameCount,
2117 void *buffer,
2118 size_t bufferSize,
2119 audio_output_flags_t flags,
2120 const Timeout& timeout,
2121 size_t frameCountToBeReady)
2122 : Track(playbackThread, NULL, streamType,
2123 audio_attributes_t{} /* currently unused for patch track */,
2124 sampleRate, format, channelMask, frameCount,
2125 buffer, bufferSize, nullptr /* sharedBuffer */,
2126 AUDIO_SESSION_NONE, getpid(), audioServerAttributionSource(getpid()), flags,
2127 TYPE_PATCH, AUDIO_PORT_HANDLE_NONE, frameCountToBeReady),
2128 PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true),
2129 *playbackThread, timeout)
2130 {
2131 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2132 __func__, mId, sampleRate,
2133 (int)mPeerTimeout.tv_sec,
2134 (int)(mPeerTimeout.tv_nsec / 1000000));
2135 }
2136
~PatchTrack()2137 AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack()
2138 {
2139 ALOGV("%s(%d)", __func__, mId);
2140 }
2141
framesReady() const2142 size_t AudioFlinger::PlaybackThread::PatchTrack::framesReady() const
2143 {
2144 if (mPeerProxy && mPeerProxy->producesBufferOnDemand()) {
2145 return std::numeric_limits<size_t>::max();
2146 } else {
2147 return Track::framesReady();
2148 }
2149 }
2150
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2151 status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
2152 audio_session_t triggerSession)
2153 {
2154 status_t status = Track::start(event, triggerSession);
2155 if (status != NO_ERROR) {
2156 return status;
2157 }
2158 android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2159 return status;
2160 }
2161
2162 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2163 status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
2164 AudioBufferProvider::Buffer* buffer)
2165 {
2166 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2167 Proxy::Buffer buf;
2168 buf.mFrameCount = buffer->frameCount;
2169 if (ATRACE_ENABLED()) {
2170 std::string traceName("PTnReq");
2171 traceName += std::to_string(id());
2172 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2173 }
2174 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2175 ALOGV_IF(status != NO_ERROR, "%s(%d): getNextBuffer status %d", __func__, mId, status);
2176 buffer->frameCount = buf.mFrameCount;
2177 if (ATRACE_ENABLED()) {
2178 std::string traceName("PTnObt");
2179 traceName += std::to_string(id());
2180 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2181 }
2182 if (buf.mFrameCount == 0) {
2183 return WOULD_BLOCK;
2184 }
2185 status = Track::getNextBuffer(buffer);
2186 return status;
2187 }
2188
releaseBuffer(AudioBufferProvider::Buffer * buffer)2189 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2190 {
2191 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2192 Proxy::Buffer buf;
2193 buf.mFrameCount = buffer->frameCount;
2194 buf.mRaw = buffer->raw;
2195 mPeerProxy->releaseBuffer(&buf);
2196 TrackBase::releaseBuffer(buffer);
2197 }
2198
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2199 status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
2200 const struct timespec *timeOut)
2201 {
2202 status_t status = NO_ERROR;
2203 static const int32_t kMaxTries = 5;
2204 int32_t tryCounter = kMaxTries;
2205 const size_t originalFrameCount = buffer->mFrameCount;
2206 do {
2207 if (status == NOT_ENOUGH_DATA) {
2208 restartIfDisabled();
2209 buffer->mFrameCount = originalFrameCount; // cleared on error, must be restored.
2210 }
2211 status = mProxy->obtainBuffer(buffer, timeOut);
2212 } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
2213 return status;
2214 }
2215
releaseBuffer(Proxy::Buffer * buffer)2216 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
2217 {
2218 mProxy->releaseBuffer(buffer);
2219 restartIfDisabled();
2220
2221 // Check if the PatchTrack has enough data to write once in releaseBuffer().
2222 // If not, prevent an underrun from occurring by moving the track into FS_FILLING;
2223 // this logic avoids glitches when suspending A2DP with AudioPlaybackCapture.
2224 // TODO: perhaps underrun avoidance could be a track property checked in isReady() instead.
2225 if (mFillingUpStatus == FS_ACTIVE
2226 && audio_is_linear_pcm(mFormat)
2227 && !isOffloadedOrDirect()) {
2228 if (sp<ThreadBase> thread = mThread.promote();
2229 thread != 0) {
2230 PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
2231 const size_t frameCount = playbackThread->frameCount() * sampleRate()
2232 / playbackThread->sampleRate();
2233 if (framesReady() < frameCount) {
2234 ALOGD("%s(%d) Not enough data, wait for buffer to fill", __func__, mId);
2235 mFillingUpStatus = FS_FILLING;
2236 }
2237 }
2238 }
2239 }
2240
restartIfDisabled()2241 void AudioFlinger::PlaybackThread::PatchTrack::restartIfDisabled()
2242 {
2243 if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
2244 ALOGW("%s(%d): disabled due to previous underrun, restarting", __func__, mId);
2245 start();
2246 }
2247 }
2248
2249 // ----------------------------------------------------------------------------
2250 // Record
2251 // ----------------------------------------------------------------------------
2252
2253
2254 #undef LOG_TAG
2255 #define LOG_TAG "AF::RecordHandle"
2256
RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack> & recordTrack)2257 AudioFlinger::RecordHandle::RecordHandle(
2258 const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
2259 : BnAudioRecord(),
2260 mRecordTrack(recordTrack)
2261 {
2262 }
2263
~RecordHandle()2264 AudioFlinger::RecordHandle::~RecordHandle() {
2265 stop_nonvirtual();
2266 mRecordTrack->destroy();
2267 }
2268
start(int event,int triggerSession)2269 binder::Status AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
2270 int /*audio_session_t*/ triggerSession) {
2271 ALOGV("%s()", __func__);
2272 return binderStatusFromStatusT(
2273 mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
2274 }
2275
stop()2276 binder::Status AudioFlinger::RecordHandle::stop() {
2277 stop_nonvirtual();
2278 return binder::Status::ok();
2279 }
2280
stop_nonvirtual()2281 void AudioFlinger::RecordHandle::stop_nonvirtual() {
2282 ALOGV("%s()", __func__);
2283 mRecordTrack->stop();
2284 }
2285
getActiveMicrophones(std::vector<media::MicrophoneInfoData> * activeMicrophones)2286 binder::Status AudioFlinger::RecordHandle::getActiveMicrophones(
2287 std::vector<media::MicrophoneInfoData>* activeMicrophones) {
2288 ALOGV("%s()", __func__);
2289 std::vector<media::MicrophoneInfo> mics;
2290 status_t status = mRecordTrack->getActiveMicrophones(&mics);
2291 activeMicrophones->resize(mics.size());
2292 for (size_t i = 0; status == OK && i < mics.size(); ++i) {
2293 status = mics[i].writeToParcelable(&activeMicrophones->at(i));
2294 }
2295 return binderStatusFromStatusT(status);
2296 }
2297
setPreferredMicrophoneDirection(int direction)2298 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
2299 int /*audio_microphone_direction_t*/ direction) {
2300 ALOGV("%s()", __func__);
2301 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
2302 static_cast<audio_microphone_direction_t>(direction)));
2303 }
2304
setPreferredMicrophoneFieldDimension(float zoom)2305 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
2306 ALOGV("%s()", __func__);
2307 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
2308 }
2309
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2310 binder::Status AudioFlinger::RecordHandle::shareAudioHistory(
2311 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2312 return binderStatusFromStatusT(
2313 mRecordTrack->shareAudioHistory(sharedAudioPackageName, sharedAudioStartMs));
2314 }
2315
2316 // ----------------------------------------------------------------------------
2317 #undef LOG_TAG
2318 #define LOG_TAG "AF::RecordTrack"
2319
2320 // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
RecordTrack(RecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2321 AudioFlinger::RecordThread::RecordTrack::RecordTrack(
2322 RecordThread *thread,
2323 const sp<Client>& client,
2324 const audio_attributes_t& attr,
2325 uint32_t sampleRate,
2326 audio_format_t format,
2327 audio_channel_mask_t channelMask,
2328 size_t frameCount,
2329 void *buffer,
2330 size_t bufferSize,
2331 audio_session_t sessionId,
2332 pid_t creatorPid,
2333 const AttributionSourceState& attributionSource,
2334 audio_input_flags_t flags,
2335 track_type type,
2336 audio_port_handle_t portId,
2337 int32_t startFrames)
2338 : TrackBase(thread, client, attr, sampleRate, format,
2339 channelMask, frameCount, buffer, bufferSize, sessionId,
2340 creatorPid,
2341 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
2342 false /*isOut*/,
2343 (type == TYPE_DEFAULT) ?
2344 ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
2345 ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
2346 type, portId,
2347 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(portId)),
2348 mOverflow(false),
2349 mFramesToDrop(0),
2350 mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
2351 mRecordBufferConverter(NULL),
2352 mFlags(flags),
2353 mSilenced(false),
2354 mStartFrames(startFrames)
2355 {
2356 if (mCblk == NULL) {
2357 return;
2358 }
2359
2360 if (!isDirect()) {
2361 mRecordBufferConverter = new RecordBufferConverter(
2362 thread->mChannelMask, thread->mFormat, thread->mSampleRate,
2363 channelMask, format, sampleRate);
2364 // Check if the RecordBufferConverter construction was successful.
2365 // If not, don't continue with construction.
2366 //
2367 // NOTE: It would be extremely rare that the record track cannot be created
2368 // for the current device, but a pending or future device change would make
2369 // the record track configuration valid.
2370 if (mRecordBufferConverter->initCheck() != NO_ERROR) {
2371 ALOGE("%s(%d): RecordTrack unable to create record buffer converter", __func__, mId);
2372 return;
2373 }
2374 }
2375
2376 mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
2377 mFrameSize, !isExternalTrack());
2378
2379 mResamplerBufferProvider = new ResamplerBufferProvider(this);
2380
2381 if (flags & AUDIO_INPUT_FLAG_FAST) {
2382 ALOG_ASSERT(thread->mFastTrackAvail);
2383 thread->mFastTrackAvail = false;
2384 } else {
2385 // TODO: only Normal Record has timestamps (Fast Record does not).
2386 mServerLatencySupported = checkServerLatencySupported(mFormat, flags);
2387 }
2388 #ifdef TEE_SINK
2389 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
2390 + "_" + std::to_string(mId)
2391 + "_R");
2392 #endif
2393
2394 // Once this item is logged by the server, the client can add properties.
2395 mTrackMetrics.logConstructor(creatorPid, uid(), id());
2396 }
2397
~RecordTrack()2398 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
2399 {
2400 ALOGV("%s()", __func__);
2401 delete mRecordBufferConverter;
2402 delete mResamplerBufferProvider;
2403 }
2404
initCheck() const2405 status_t AudioFlinger::RecordThread::RecordTrack::initCheck() const
2406 {
2407 status_t status = TrackBase::initCheck();
2408 if (status == NO_ERROR && mServerProxy == 0) {
2409 status = BAD_VALUE;
2410 }
2411 return status;
2412 }
2413
2414 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2415 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
2416 {
2417 ServerProxy::Buffer buf;
2418 buf.mFrameCount = buffer->frameCount;
2419 status_t status = mServerProxy->obtainBuffer(&buf);
2420 buffer->frameCount = buf.mFrameCount;
2421 buffer->raw = buf.mRaw;
2422 if (buf.mFrameCount == 0) {
2423 // FIXME also wake futex so that overrun is noticed more quickly
2424 (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
2425 }
2426 return status;
2427 }
2428
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2429 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
2430 audio_session_t triggerSession)
2431 {
2432 sp<ThreadBase> thread = mThread.promote();
2433 if (thread != 0) {
2434 RecordThread *recordThread = (RecordThread *)thread.get();
2435 return recordThread->start(this, event, triggerSession);
2436 } else {
2437 ALOGW("%s track %d: thread was destroyed", __func__, portId());
2438 return DEAD_OBJECT;
2439 }
2440 }
2441
stop()2442 void AudioFlinger::RecordThread::RecordTrack::stop()
2443 {
2444 sp<ThreadBase> thread = mThread.promote();
2445 if (thread != 0) {
2446 RecordThread *recordThread = (RecordThread *)thread.get();
2447 if (recordThread->stop(this) && isExternalTrack()) {
2448 AudioSystem::stopInput(mPortId);
2449 }
2450 }
2451 }
2452
destroy()2453 void AudioFlinger::RecordThread::RecordTrack::destroy()
2454 {
2455 // see comments at AudioFlinger::PlaybackThread::Track::destroy()
2456 sp<RecordTrack> keep(this);
2457 {
2458 track_state priorState = mState;
2459 sp<ThreadBase> thread = mThread.promote();
2460 if (thread != 0) {
2461 Mutex::Autolock _l(thread->mLock);
2462 RecordThread *recordThread = (RecordThread *) thread.get();
2463 priorState = mState;
2464 if (!mSharedAudioPackageName.empty()) {
2465 recordThread->resetAudioHistory_l();
2466 }
2467 recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
2468 }
2469 // APM portid/client management done outside of lock.
2470 // NOTE: if thread doesn't exist, the input descriptor probably doesn't either.
2471 if (isExternalTrack()) {
2472 switch (priorState) {
2473 case ACTIVE: // invalidated while still active
2474 case STARTING_2: // invalidated/start-aborted after startInput successfully called
2475 case PAUSING: // invalidated while in the middle of stop() pausing (still active)
2476 AudioSystem::stopInput(mPortId);
2477 break;
2478
2479 case STARTING_1: // invalidated/start-aborted and startInput not successful
2480 case PAUSED: // OK, not active
2481 case IDLE: // OK, not active
2482 break;
2483
2484 case STOPPED: // unexpected (destroyed)
2485 default:
2486 LOG_ALWAYS_FATAL("%s(%d): invalid prior state: %d", __func__, mId, priorState);
2487 }
2488 AudioSystem::releaseInput(mPortId);
2489 }
2490 }
2491 }
2492
invalidate()2493 void AudioFlinger::RecordThread::RecordTrack::invalidate()
2494 {
2495 TrackBase::invalidate();
2496 // FIXME should use proxy, and needs work
2497 audio_track_cblk_t* cblk = mCblk;
2498 android_atomic_or(CBLK_INVALID, &cblk->mFlags);
2499 android_atomic_release_store(0x40000000, &cblk->mFutex);
2500 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
2501 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
2502 }
2503
2504
appendDumpHeader(String8 & result)2505 void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
2506 {
2507 result.appendFormat("Active Id Client Session Port Id S Flags "
2508 " Format Chn mask SRate Source "
2509 " Server FrmCnt FrmRdy Sil%s\n",
2510 isServerLatencySupported() ? " Latency" : "");
2511 }
2512
appendDump(String8 & result,bool active)2513 void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
2514 {
2515 result.appendFormat("%c%5s %6d %6u %7u %7u %2s 0x%03X "
2516 "%08X %08X %6u %6X "
2517 "%08X %6zu %6zu %3c",
2518 isFastTrack() ? 'F' : ' ',
2519 active ? "yes" : "no",
2520 mId,
2521 (mClient == 0) ? getpid() : mClient->pid(),
2522 mSessionId,
2523 mPortId,
2524 getTrackStateAsCodedString(),
2525 mCblk->mFlags,
2526
2527 mFormat,
2528 mChannelMask,
2529 mSampleRate,
2530 mAttr.source,
2531
2532 mCblk->mServer,
2533 mFrameCount,
2534 mServerProxy->framesReadySafe(),
2535 isSilenced() ? 's' : 'n'
2536 );
2537 if (isServerLatencySupported()) {
2538 double latencyMs;
2539 bool fromTrack;
2540 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
2541 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
2542 // or 'k' if estimated from kernel (usually for debugging).
2543 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
2544 } else {
2545 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
2546 }
2547 }
2548 result.append("\n");
2549 }
2550
handleSyncStartEvent(const sp<SyncEvent> & event)2551 void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
2552 {
2553 if (event == mSyncStartEvent) {
2554 ssize_t framesToDrop = 0;
2555 sp<ThreadBase> threadBase = mThread.promote();
2556 if (threadBase != 0) {
2557 // TODO: use actual buffer filling status instead of 2 buffers when info is available
2558 // from audio HAL
2559 framesToDrop = threadBase->mFrameCount * 2;
2560 }
2561 mFramesToDrop = framesToDrop;
2562 }
2563 }
2564
clearSyncStartEvent()2565 void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent()
2566 {
2567 if (mSyncStartEvent != 0) {
2568 mSyncStartEvent->cancel();
2569 mSyncStartEvent.clear();
2570 }
2571 mFramesToDrop = 0;
2572 }
2573
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sourceFramesRead,uint32_t halSampleRate,const ExtendedTimestamp & timestamp)2574 void AudioFlinger::RecordThread::RecordTrack::updateTrackFrameInfo(
2575 int64_t trackFramesReleased, int64_t sourceFramesRead,
2576 uint32_t halSampleRate, const ExtendedTimestamp ×tamp)
2577 {
2578 // Make the kernel frametime available.
2579 const FrameTime ft{
2580 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2581 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
2582 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
2583 mKernelFrameTime.store(ft);
2584 if (!audio_is_linear_pcm(mFormat)) {
2585 return;
2586 }
2587
2588 ExtendedTimestamp local = timestamp;
2589
2590 // Convert HAL frames to server-side track frames at track sample rate.
2591 // We use trackFramesReleased and sourceFramesRead as an anchor point.
2592 for (int i = ExtendedTimestamp::LOCATION_SERVER; i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2593 if (local.mTimeNs[i] != 0) {
2594 const int64_t relativeServerFrames = local.mPosition[i] - sourceFramesRead;
2595 const int64_t relativeTrackFrames = relativeServerFrames
2596 * mSampleRate / halSampleRate; // TODO: potential computation overflow
2597 local.mPosition[i] = relativeTrackFrames + trackFramesReleased;
2598 }
2599 }
2600 mServerProxy->setTimestamp(local);
2601
2602 // Compute latency info.
2603 const bool useTrackTimestamp = true; // use track unless debugging.
2604 const double latencyMs = - (useTrackTimestamp
2605 ? local.getOutputServerLatencyMs(sampleRate())
2606 : timestamp.getOutputServerLatencyMs(halSampleRate));
2607
2608 mServerLatencyFromTrack.store(useTrackTimestamp);
2609 mServerLatencyMs.store(latencyMs);
2610 }
2611
getActiveMicrophones(std::vector<media::MicrophoneInfo> * activeMicrophones)2612 status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
2613 std::vector<media::MicrophoneInfo>* activeMicrophones)
2614 {
2615 sp<ThreadBase> thread = mThread.promote();
2616 if (thread != 0) {
2617 RecordThread *recordThread = (RecordThread *)thread.get();
2618 return recordThread->getActiveMicrophones(activeMicrophones);
2619 } else {
2620 return BAD_VALUE;
2621 }
2622 }
2623
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)2624 status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneDirection(
2625 audio_microphone_direction_t direction) {
2626 sp<ThreadBase> thread = mThread.promote();
2627 if (thread != 0) {
2628 RecordThread *recordThread = (RecordThread *)thread.get();
2629 return recordThread->setPreferredMicrophoneDirection(direction);
2630 } else {
2631 return BAD_VALUE;
2632 }
2633 }
2634
setPreferredMicrophoneFieldDimension(float zoom)2635 status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
2636 sp<ThreadBase> thread = mThread.promote();
2637 if (thread != 0) {
2638 RecordThread *recordThread = (RecordThread *)thread.get();
2639 return recordThread->setPreferredMicrophoneFieldDimension(zoom);
2640 } else {
2641 return BAD_VALUE;
2642 }
2643 }
2644
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2645 status_t AudioFlinger::RecordThread::RecordTrack::shareAudioHistory(
2646 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2647
2648 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
2649 const pid_t callingPid = IPCThreadState::self()->getCallingPid();
2650 if (callingUid != mUid || callingPid != mCreatorPid) {
2651 return PERMISSION_DENIED;
2652 }
2653
2654 AttributionSourceState attributionSource{};
2655 attributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
2656 attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
2657 attributionSource.token = sp<BBinder>::make();
2658 if (!captureHotwordAllowed(attributionSource)) {
2659 return PERMISSION_DENIED;
2660 }
2661
2662 sp<ThreadBase> thread = mThread.promote();
2663 if (thread != 0) {
2664 RecordThread *recordThread = (RecordThread *)thread.get();
2665 status_t status = recordThread->shareAudioHistory(
2666 sharedAudioPackageName, mSessionId, sharedAudioStartMs);
2667 if (status == NO_ERROR) {
2668 mSharedAudioPackageName = sharedAudioPackageName;
2669 }
2670 return status;
2671 } else {
2672 return BAD_VALUE;
2673 }
2674 }
2675
2676
2677 // ----------------------------------------------------------------------------
2678 #undef LOG_TAG
2679 #define LOG_TAG "AF::PatchRecord"
2680
PatchRecord(RecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout)2681 AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
2682 uint32_t sampleRate,
2683 audio_channel_mask_t channelMask,
2684 audio_format_t format,
2685 size_t frameCount,
2686 void *buffer,
2687 size_t bufferSize,
2688 audio_input_flags_t flags,
2689 const Timeout& timeout)
2690 : RecordTrack(recordThread, NULL,
2691 audio_attributes_t{} /* currently unused for patch track */,
2692 sampleRate, format, channelMask, frameCount,
2693 buffer, bufferSize, AUDIO_SESSION_NONE, getpid(),
2694 audioServerAttributionSource(getpid()), flags, TYPE_PATCH),
2695 PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true),
2696 *recordThread, timeout)
2697 {
2698 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2699 __func__, mId, sampleRate,
2700 (int)mPeerTimeout.tv_sec,
2701 (int)(mPeerTimeout.tv_nsec / 1000000));
2702 }
2703
~PatchRecord()2704 AudioFlinger::RecordThread::PatchRecord::~PatchRecord()
2705 {
2706 ALOGV("%s(%d)", __func__, mId);
2707 }
2708
writeFramesHelper(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)2709 static size_t writeFramesHelper(
2710 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
2711 {
2712 AudioBufferProvider::Buffer patchBuffer;
2713 patchBuffer.frameCount = frameCount;
2714 auto status = dest->getNextBuffer(&patchBuffer);
2715 if (status != NO_ERROR) {
2716 ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
2717 __func__, status, strerror(-status));
2718 return 0;
2719 }
2720 ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
2721 memcpy(patchBuffer.raw, src, patchBuffer.frameCount * frameSize);
2722 size_t framesWritten = patchBuffer.frameCount;
2723 dest->releaseBuffer(&patchBuffer);
2724 return framesWritten;
2725 }
2726
2727 // static
writeFrames(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)2728 size_t AudioFlinger::RecordThread::PatchRecord::writeFrames(
2729 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
2730 {
2731 size_t framesWritten = writeFramesHelper(dest, src, frameCount, frameSize);
2732 // On buffer wrap, the buffer frame count will be less than requested,
2733 // when this happens a second buffer needs to be used to write the leftover audio
2734 const size_t framesLeft = frameCount - framesWritten;
2735 if (framesWritten != 0 && framesLeft != 0) {
2736 framesWritten += writeFramesHelper(dest, (const char*)src + framesWritten * frameSize,
2737 framesLeft, frameSize);
2738 }
2739 return framesWritten;
2740 }
2741
2742 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2743 status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
2744 AudioBufferProvider::Buffer* buffer)
2745 {
2746 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2747 Proxy::Buffer buf;
2748 buf.mFrameCount = buffer->frameCount;
2749 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2750 ALOGV_IF(status != NO_ERROR,
2751 "%s(%d): mPeerProxy->obtainBuffer status %d", __func__, mId, status);
2752 buffer->frameCount = buf.mFrameCount;
2753 if (ATRACE_ENABLED()) {
2754 std::string traceName("PRnObt");
2755 traceName += std::to_string(id());
2756 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2757 }
2758 if (buf.mFrameCount == 0) {
2759 return WOULD_BLOCK;
2760 }
2761 status = RecordTrack::getNextBuffer(buffer);
2762 return status;
2763 }
2764
releaseBuffer(AudioBufferProvider::Buffer * buffer)2765 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2766 {
2767 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2768 Proxy::Buffer buf;
2769 buf.mFrameCount = buffer->frameCount;
2770 buf.mRaw = buffer->raw;
2771 mPeerProxy->releaseBuffer(&buf);
2772 TrackBase::releaseBuffer(buffer);
2773 }
2774
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2775 status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
2776 const struct timespec *timeOut)
2777 {
2778 return mProxy->obtainBuffer(buffer, timeOut);
2779 }
2780
releaseBuffer(Proxy::Buffer * buffer)2781 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
2782 {
2783 mProxy->releaseBuffer(buffer);
2784 }
2785
2786 #undef LOG_TAG
2787 #define LOG_TAG "AF::PthrPatchRecord"
2788
allocAligned(size_t alignment,size_t size)2789 static std::unique_ptr<void, decltype(free)*> allocAligned(size_t alignment, size_t size)
2790 {
2791 void *ptr = nullptr;
2792 (void)posix_memalign(&ptr, alignment, size);
2793 return std::unique_ptr<void, decltype(free)*>(ptr, free);
2794 }
2795
PassthruPatchRecord(RecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags)2796 AudioFlinger::RecordThread::PassthruPatchRecord::PassthruPatchRecord(
2797 RecordThread *recordThread,
2798 uint32_t sampleRate,
2799 audio_channel_mask_t channelMask,
2800 audio_format_t format,
2801 size_t frameCount,
2802 audio_input_flags_t flags)
2803 : PatchRecord(recordThread, sampleRate, channelMask, format, frameCount,
2804 nullptr /*buffer*/, 0 /*bufferSize*/, flags),
2805 mPatchRecordAudioBufferProvider(*this),
2806 mSinkBuffer(allocAligned(32, mFrameCount * mFrameSize)),
2807 mStubBuffer(allocAligned(32, mFrameCount * mFrameSize))
2808 {
2809 memset(mStubBuffer.get(), 0, mFrameCount * mFrameSize);
2810 }
2811
obtainStream(sp<ThreadBase> * thread)2812 sp<StreamInHalInterface> AudioFlinger::RecordThread::PassthruPatchRecord::obtainStream(
2813 sp<ThreadBase>* thread)
2814 {
2815 *thread = mThread.promote();
2816 if (!*thread) return nullptr;
2817 RecordThread *recordThread = static_cast<RecordThread*>((*thread).get());
2818 Mutex::Autolock _l(recordThread->mLock);
2819 return recordThread->mInput ? recordThread->mInput->stream : nullptr;
2820 }
2821
2822 // PatchProxyBufferProvider methods are called on DirectOutputThread
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2823 status_t AudioFlinger::RecordThread::PassthruPatchRecord::obtainBuffer(
2824 Proxy::Buffer* buffer, const struct timespec* timeOut)
2825 {
2826 if (mUnconsumedFrames) {
2827 buffer->mFrameCount = std::min(buffer->mFrameCount, mUnconsumedFrames);
2828 // mUnconsumedFrames is decreased in releaseBuffer to use actual frame consumption figure.
2829 return PatchRecord::obtainBuffer(buffer, timeOut);
2830 }
2831
2832 // Otherwise, execute a read from HAL and write into the buffer.
2833 nsecs_t startTimeNs = 0;
2834 if (timeOut && (timeOut->tv_sec != 0 || timeOut->tv_nsec != 0) && timeOut->tv_sec != INT_MAX) {
2835 // Will need to correct timeOut by elapsed time.
2836 startTimeNs = systemTime();
2837 }
2838 const size_t framesToRead = std::min(buffer->mFrameCount, mFrameCount);
2839 buffer->mFrameCount = 0;
2840 buffer->mRaw = nullptr;
2841 sp<ThreadBase> thread;
2842 sp<StreamInHalInterface> stream = obtainStream(&thread);
2843 if (!stream) return NO_INIT; // If there is no stream, RecordThread is not reading.
2844
2845 status_t result = NO_ERROR;
2846 size_t bytesRead = 0;
2847 {
2848 ATRACE_NAME("read");
2849 result = stream->read(mSinkBuffer.get(), framesToRead * mFrameSize, &bytesRead);
2850 if (result != NO_ERROR) goto stream_error;
2851 if (bytesRead == 0) return NO_ERROR;
2852 }
2853
2854 {
2855 std::lock_guard<std::mutex> lock(mReadLock);
2856 mReadBytes += bytesRead;
2857 mReadError = NO_ERROR;
2858 }
2859 mReadCV.notify_one();
2860 // writeFrames handles wraparound and should write all the provided frames.
2861 // If it couldn't, there is something wrong with the client/server buffer of the software patch.
2862 buffer->mFrameCount = writeFrames(
2863 &mPatchRecordAudioBufferProvider,
2864 mSinkBuffer.get(), bytesRead / mFrameSize, mFrameSize);
2865 ALOGW_IF(buffer->mFrameCount < bytesRead / mFrameSize,
2866 "Lost %zu frames obtained from HAL", bytesRead / mFrameSize - buffer->mFrameCount);
2867 mUnconsumedFrames = buffer->mFrameCount;
2868 struct timespec newTimeOut;
2869 if (startTimeNs) {
2870 // Correct the timeout by elapsed time.
2871 nsecs_t newTimeOutNs = audio_utils_ns_from_timespec(timeOut) - (systemTime() - startTimeNs);
2872 if (newTimeOutNs < 0) newTimeOutNs = 0;
2873 newTimeOut.tv_sec = newTimeOutNs / NANOS_PER_SECOND;
2874 newTimeOut.tv_nsec = newTimeOutNs - newTimeOut.tv_sec * NANOS_PER_SECOND;
2875 timeOut = &newTimeOut;
2876 }
2877 return PatchRecord::obtainBuffer(buffer, timeOut);
2878
2879 stream_error:
2880 stream->standby();
2881 {
2882 std::lock_guard<std::mutex> lock(mReadLock);
2883 mReadError = result;
2884 }
2885 mReadCV.notify_one();
2886 return result;
2887 }
2888
releaseBuffer(Proxy::Buffer * buffer)2889 void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(Proxy::Buffer* buffer)
2890 {
2891 if (buffer->mFrameCount <= mUnconsumedFrames) {
2892 mUnconsumedFrames -= buffer->mFrameCount;
2893 } else {
2894 ALOGW("Write side has consumed more frames than we had: %zu > %zu",
2895 buffer->mFrameCount, mUnconsumedFrames);
2896 mUnconsumedFrames = 0;
2897 }
2898 PatchRecord::releaseBuffer(buffer);
2899 }
2900
2901 // AudioBufferProvider and Source methods are called on RecordThread
2902 // 'read' emulates actual audio data with 0's. This is OK as 'getNextBuffer'
2903 // and 'releaseBuffer' are stubbed out and ignore their input.
2904 // It's not possible to retrieve actual data here w/o blocking 'obtainBuffer'
2905 // until we copy it.
read(void * buffer,size_t bytes,size_t * read)2906 status_t AudioFlinger::RecordThread::PassthruPatchRecord::read(
2907 void* buffer, size_t bytes, size_t* read)
2908 {
2909 bytes = std::min(bytes, mFrameCount * mFrameSize);
2910 {
2911 std::unique_lock<std::mutex> lock(mReadLock);
2912 mReadCV.wait(lock, [&]{ return mReadError != NO_ERROR || mReadBytes != 0; });
2913 if (mReadError != NO_ERROR) {
2914 mLastReadFrames = 0;
2915 return mReadError;
2916 }
2917 *read = std::min(bytes, mReadBytes);
2918 mReadBytes -= *read;
2919 }
2920 mLastReadFrames = *read / mFrameSize;
2921 memset(buffer, 0, *read);
2922 return 0;
2923 }
2924
getCapturePosition(int64_t * frames,int64_t * time)2925 status_t AudioFlinger::RecordThread::PassthruPatchRecord::getCapturePosition(
2926 int64_t* frames, int64_t* time)
2927 {
2928 sp<ThreadBase> thread;
2929 sp<StreamInHalInterface> stream = obtainStream(&thread);
2930 return stream ? stream->getCapturePosition(frames, time) : NO_INIT;
2931 }
2932
standby()2933 status_t AudioFlinger::RecordThread::PassthruPatchRecord::standby()
2934 {
2935 // RecordThread issues 'standby' command in two major cases:
2936 // 1. Error on read--this case is handled in 'obtainBuffer'.
2937 // 2. Track is stopping--as PassthruPatchRecord assumes continuous
2938 // output, this can only happen when the software patch
2939 // is being torn down. In this case, the RecordThread
2940 // will terminate and close the HAL stream.
2941 return 0;
2942 }
2943
2944 // As the buffer gets filled in obtainBuffer, here we only simulate data consumption.
getNextBuffer(AudioBufferProvider::Buffer * buffer)2945 status_t AudioFlinger::RecordThread::PassthruPatchRecord::getNextBuffer(
2946 AudioBufferProvider::Buffer* buffer)
2947 {
2948 buffer->frameCount = mLastReadFrames;
2949 buffer->raw = buffer->frameCount != 0 ? mStubBuffer.get() : nullptr;
2950 return NO_ERROR;
2951 }
2952
releaseBuffer(AudioBufferProvider::Buffer * buffer)2953 void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(
2954 AudioBufferProvider::Buffer* buffer)
2955 {
2956 buffer->frameCount = 0;
2957 buffer->raw = nullptr;
2958 }
2959
2960 // ----------------------------------------------------------------------------
2961 #undef LOG_TAG
2962 #define LOG_TAG "AF::MmapTrack"
2963
MmapTrack(ThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId)2964 AudioFlinger::MmapThread::MmapTrack::MmapTrack(ThreadBase *thread,
2965 const audio_attributes_t& attr,
2966 uint32_t sampleRate,
2967 audio_format_t format,
2968 audio_channel_mask_t channelMask,
2969 audio_session_t sessionId,
2970 bool isOut,
2971 const AttributionSourceState& attributionSource,
2972 pid_t creatorPid,
2973 audio_port_handle_t portId)
2974 : TrackBase(thread, NULL, attr, sampleRate, format,
2975 channelMask, (size_t)0 /* frameCount */,
2976 nullptr /* buffer */, (size_t)0 /* bufferSize */,
2977 sessionId, creatorPid,
2978 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
2979 isOut,
2980 ALLOC_NONE,
2981 TYPE_DEFAULT, portId,
2982 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_MMAP) + std::to_string(portId)),
2983 mPid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.pid))),
2984 mSilenced(false), mSilencedNotified(false)
2985 {
2986 // Once this item is logged by the server, the client can add properties.
2987 mTrackMetrics.logConstructor(creatorPid, uid(), id());
2988 }
2989
~MmapTrack()2990 AudioFlinger::MmapThread::MmapTrack::~MmapTrack()
2991 {
2992 }
2993
initCheck() const2994 status_t AudioFlinger::MmapThread::MmapTrack::initCheck() const
2995 {
2996 return NO_ERROR;
2997 }
2998
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)2999 status_t AudioFlinger::MmapThread::MmapTrack::start(AudioSystem::sync_event_t event __unused,
3000 audio_session_t triggerSession __unused)
3001 {
3002 return NO_ERROR;
3003 }
3004
stop()3005 void AudioFlinger::MmapThread::MmapTrack::stop()
3006 {
3007 }
3008
3009 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3010 status_t AudioFlinger::MmapThread::MmapTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
3011 {
3012 buffer->frameCount = 0;
3013 buffer->raw = nullptr;
3014 return INVALID_OPERATION;
3015 }
3016
3017 // ExtendedAudioBufferProvider interface
framesReady() const3018 size_t AudioFlinger::MmapThread::MmapTrack::framesReady() const {
3019 return 0;
3020 }
3021
framesReleased() const3022 int64_t AudioFlinger::MmapThread::MmapTrack::framesReleased() const
3023 {
3024 return 0;
3025 }
3026
onTimestamp(const ExtendedTimestamp & timestamp __unused)3027 void AudioFlinger::MmapThread::MmapTrack::onTimestamp(const ExtendedTimestamp ×tamp __unused)
3028 {
3029 }
3030
appendDumpHeader(String8 & result)3031 void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
3032 {
3033 result.appendFormat("Client Session Port Id Format Chn mask SRate Flags %s\n",
3034 isOut() ? "Usg CT": "Source");
3035 }
3036
appendDump(String8 & result,bool active __unused)3037 void AudioFlinger::MmapThread::MmapTrack::appendDump(String8& result, bool active __unused)
3038 {
3039 result.appendFormat("%6u %7u %7u %08X %08X %6u 0x%03X ",
3040 mPid,
3041 mSessionId,
3042 mPortId,
3043 mFormat,
3044 mChannelMask,
3045 mSampleRate,
3046 mAttr.flags);
3047 if (isOut()) {
3048 result.appendFormat("%3x %2x", mAttr.usage, mAttr.content_type);
3049 } else {
3050 result.appendFormat("%6x", mAttr.source);
3051 }
3052 result.append("\n");
3053 }
3054
3055 } // namespace android
3056