1 /*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "AudioStreamRecord"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20
21 #include <stdint.h>
22
23 #include <aaudio/AAudio.h>
24 #include <audio_utils/primitives.h>
25 #include <media/AidlConversion.h>
26 #include <media/AudioRecord.h>
27 #include <utils/String16.h>
28
29 #include "core/AudioGlobal.h"
30 #include "legacy/AudioStreamLegacy.h"
31 #include "legacy/AudioStreamRecord.h"
32 #include "utility/AudioClock.h"
33 #include "utility/FixedBlockWriter.h"
34
35 using android::content::AttributionSourceState;
36
37 using namespace android;
38 using namespace aaudio;
39
AudioStreamRecord()40 AudioStreamRecord::AudioStreamRecord()
41 : AudioStreamLegacy()
42 , mFixedBlockWriter(*this)
43 {
44 }
45
~AudioStreamRecord()46 AudioStreamRecord::~AudioStreamRecord()
47 {
48 const aaudio_stream_state_t state = getState();
49 bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
50 ALOGE_IF(bad, "stream not closed, in state %d", state);
51 }
52
open(const AudioStreamBuilder & builder)53 aaudio_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
54 {
55 aaudio_result_t result = AAUDIO_OK;
56
57 result = AudioStream::open(builder);
58 if (result != AAUDIO_OK) {
59 return result;
60 }
61
62 // Try to create an AudioRecord
63
64 const aaudio_session_id_t requestedSessionId = builder.getSessionId();
65 const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
66
67 // TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
68 audio_channel_mask_t channelMask =
69 AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), true /*isInput*/);
70
71 size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
72 : builder.getBufferCapacity();
73
74
75 audio_input_flags_t flags;
76 aaudio_performance_mode_t perfMode = getPerformanceMode();
77 switch (perfMode) {
78 case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
79 // If the app asks for a sessionId then it means they want to use effects.
80 // So don't use RAW flag.
81 flags = (audio_input_flags_t) ((requestedSessionId == AAUDIO_SESSION_ID_NONE)
82 ? (AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW)
83 : (AUDIO_INPUT_FLAG_FAST));
84 break;
85
86 case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
87 case AAUDIO_PERFORMANCE_MODE_NONE:
88 default:
89 flags = AUDIO_INPUT_FLAG_NONE;
90 break;
91 }
92
93 const audio_format_t requestedFormat = getFormat();
94 // Preserve behavior of API 26
95 if (requestedFormat == AUDIO_FORMAT_DEFAULT) {
96 setFormat(AUDIO_FORMAT_PCM_FLOAT);
97 }
98
99 // Maybe change device format to get a FAST path.
100 // AudioRecord does not support FAST mode for FLOAT data.
101 // TODO AudioRecord should allow FLOAT data paths for FAST tracks.
102 // So IF the user asks for low latency FLOAT
103 // AND the sampleRate is likely to be compatible with FAST
104 // THEN request I16 and convert to FLOAT when passing to user.
105 // Note that hard coding 48000 Hz is not ideal because the sampleRate
106 // for a FAST path might not be 48000 Hz.
107 // It normally is but there is a chance that it is not.
108 // And there is no reliable way to know that in advance.
109 // Luckily the consequences of a wrong guess are minor.
110 // We just may not get a FAST track.
111 // But we wouldn't have anyway without this hack.
112 constexpr int32_t kMostLikelySampleRateForFast = 48000;
113 if (getFormat() == AUDIO_FORMAT_PCM_FLOAT
114 && perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
115 && (audio_channel_count_from_in_mask(channelMask) <= 2) // FAST only for mono and stereo
116 && (getSampleRate() == kMostLikelySampleRateForFast
117 || getSampleRate() == AAUDIO_UNSPECIFIED)) {
118 setDeviceFormat(AUDIO_FORMAT_PCM_16_BIT);
119 } else {
120 setDeviceFormat(getFormat());
121 }
122
123 // To avoid glitching, let AudioFlinger pick the optimal burst size.
124 uint32_t notificationFrames = 0;
125
126 // Setup the callback if there is one.
127 AudioRecord::callback_t callback = nullptr;
128 void *callbackData = nullptr;
129 AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
130 if (builder.getDataCallbackProc() != nullptr) {
131 streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
132 callback = getLegacyCallback();
133 callbackData = this;
134 }
135 mCallbackBufferSize = builder.getFramesPerDataCallback();
136
137 // Don't call mAudioRecord->setInputDevice() because it will be overwritten by set()!
138 audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
139 ? AUDIO_PORT_HANDLE_NONE
140 : getDeviceId();
141
142 const audio_content_type_t contentType =
143 AAudioConvert_contentTypeToInternal(builder.getContentType());
144 const audio_source_t source =
145 AAudioConvert_inputPresetToAudioSource(builder.getInputPreset());
146
147 const audio_flags_mask_t attrFlags =
148 AAudioConvert_privacySensitiveToAudioFlagsMask(builder.isPrivacySensitive());
149 const audio_attributes_t attributes = {
150 .content_type = contentType,
151 .usage = AUDIO_USAGE_UNKNOWN, // only used for output
152 .source = source,
153 .flags = attrFlags, // Different than the AUDIO_INPUT_FLAGS
154 .tags = ""
155 };
156
157 // TODO b/182392769: use attribution source util
158 AttributionSourceState attributionSource;
159 attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
160 attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
161 attributionSource.packageName = builder.getOpPackageName();
162 attributionSource.attributionTag = builder.getAttributionTag();
163 attributionSource.token = sp<BBinder>::make();
164
165 // ----------- open the AudioRecord ---------------------
166 // Might retry, but never more than once.
167 for (int i = 0; i < 2; i ++) {
168 const audio_format_t requestedInternalFormat = getDeviceFormat();
169
170 mAudioRecord = new AudioRecord(
171 attributionSource
172 );
173 mAudioRecord->set(
174 AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
175 getSampleRate(),
176 requestedInternalFormat,
177 channelMask,
178 frameCount,
179 callback,
180 callbackData,
181 notificationFrames,
182 false /*threadCanCallJava*/,
183 sessionId,
184 streamTransferType,
185 flags,
186 AUDIO_UID_INVALID, // DEFAULT uid
187 -1, // DEFAULT pid
188 &attributes,
189 selectedDeviceId
190 );
191
192 // Set it here so it can be logged by the destructor if the open failed.
193 mAudioRecord->setCallerName(kCallerName);
194
195 // Did we get a valid track?
196 status_t status = mAudioRecord->initCheck();
197 if (status != OK) {
198 safeReleaseClose();
199 ALOGE("open(), initCheck() returned %d", status);
200 return AAudioConvert_androidToAAudioResult(status);
201 }
202
203 // Check to see if it was worth hacking the deviceFormat.
204 bool gotFastPath = (mAudioRecord->getFlags() & AUDIO_INPUT_FLAG_FAST)
205 == AUDIO_INPUT_FLAG_FAST;
206 if (getFormat() != getDeviceFormat() && !gotFastPath) {
207 // We tried to get a FAST path by switching the device format.
208 // But it didn't work. So we might as well reopen using the same
209 // format for device and for app.
210 ALOGD("%s() used a different device format but no FAST path, reopen", __func__);
211 mAudioRecord.clear();
212 setDeviceFormat(getFormat());
213 } else {
214 break; // Keep the one we just opened.
215 }
216 }
217
218 mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD)
219 + std::to_string(mAudioRecord->getPortId());
220 android::mediametrics::LogItem(mMetricsId)
221 .set(AMEDIAMETRICS_PROP_PERFORMANCEMODE,
222 AudioGlobal_convertPerformanceModeToText(builder.getPerformanceMode()))
223 .set(AMEDIAMETRICS_PROP_SHARINGMODE,
224 AudioGlobal_convertSharingModeToText(builder.getSharingMode()))
225 .set(AMEDIAMETRICS_PROP_ENCODINGCLIENT, toString(requestedFormat).c_str()).record();
226
227 // Get the actual values from the AudioRecord.
228 setChannelMask(AAudioConvert_androidToAAudioChannelMask(
229 mAudioRecord->channelMask(), true /*isInput*/,
230 AAudio_isChannelIndexMask(getChannelMask())));
231 setSampleRate(mAudioRecord->getSampleRate());
232 setBufferCapacity(getBufferCapacityFromDevice());
233 setFramesPerBurst(getFramesPerBurstFromDevice());
234
235 // We may need to pass the data through a block size adapter to guarantee constant size.
236 if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
237 // The block adapter runs before the format conversion.
238 // So we need to use the device frame size.
239 mBlockAdapterBytesPerFrame = getBytesPerDeviceFrame();
240 int callbackSizeBytes = mBlockAdapterBytesPerFrame * mCallbackBufferSize;
241 mFixedBlockWriter.open(callbackSizeBytes);
242 mBlockAdapter = &mFixedBlockWriter;
243 } else {
244 mBlockAdapter = nullptr;
245 }
246
247 // Allocate format conversion buffer if needed.
248 if (getDeviceFormat() == AUDIO_FORMAT_PCM_16_BIT
249 && getFormat() == AUDIO_FORMAT_PCM_FLOAT) {
250
251 if (builder.getDataCallbackProc() != nullptr) {
252 // If we have a callback then we need to convert the data into an internal float
253 // array and then pass that entire array to the app.
254 mFormatConversionBufferSizeInFrames =
255 (mCallbackBufferSize != AAUDIO_UNSPECIFIED)
256 ? mCallbackBufferSize : getFramesPerBurst();
257 int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
258 mFormatConversionBufferFloat = std::make_unique<float[]>(numSamples);
259 } else {
260 // If we don't have a callback then we will read into an internal short array
261 // and then convert into the app float array in read().
262 mFormatConversionBufferSizeInFrames = getFramesPerBurst();
263 int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
264 mFormatConversionBufferI16 = std::make_unique<int16_t[]>(numSamples);
265 }
266 ALOGD("%s() setup I16>FLOAT conversion buffer with %d frames",
267 __func__, mFormatConversionBufferSizeInFrames);
268 }
269
270 // Update performance mode based on the actual stream.
271 // For example, if the sample rate does not match native then you won't get a FAST track.
272 audio_input_flags_t actualFlags = mAudioRecord->getFlags();
273 aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
274 // FIXME Some platforms do not advertise RAW mode for low latency inputs.
275 if ((actualFlags & (AUDIO_INPUT_FLAG_FAST))
276 == (AUDIO_INPUT_FLAG_FAST)) {
277 actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
278 }
279 setPerformanceMode(actualPerformanceMode);
280
281 setSharingMode(AAUDIO_SHARING_MODE_SHARED); // EXCLUSIVE mode not supported in legacy
282
283 // Log warning if we did not get what we asked for.
284 ALOGW_IF(actualFlags != flags,
285 "open() flags changed from 0x%08X to 0x%08X",
286 flags, actualFlags);
287 ALOGW_IF(actualPerformanceMode != perfMode,
288 "open() perfMode changed from %d to %d",
289 perfMode, actualPerformanceMode);
290
291 setState(AAUDIO_STREAM_STATE_OPEN);
292 setDeviceId(mAudioRecord->getRoutedDeviceId());
293
294 aaudio_session_id_t actualSessionId =
295 (requestedSessionId == AAUDIO_SESSION_ID_NONE)
296 ? AAUDIO_SESSION_ID_NONE
297 : (aaudio_session_id_t) mAudioRecord->getSessionId();
298 setSessionId(actualSessionId);
299
300 mAudioRecord->addAudioDeviceCallback(this);
301
302 return AAUDIO_OK;
303 }
304
release_l()305 aaudio_result_t AudioStreamRecord::release_l() {
306 // TODO add close() or release() to AudioFlinger's AudioRecord API.
307 // Then call it from here
308 if (getState() != AAUDIO_STREAM_STATE_CLOSING) {
309 mAudioRecord->removeAudioDeviceCallback(this);
310 logReleaseBufferState();
311 // Data callbacks may still be running!
312 return AudioStream::release_l();
313 } else {
314 return AAUDIO_OK; // already released
315 }
316 }
317
close_l()318 void AudioStreamRecord::close_l() {
319 // The callbacks are normally joined in the AudioRecord destructor.
320 // But if another object has a reference to the AudioRecord then
321 // it will not get deleted here.
322 // So we should join callbacks explicitly before returning.
323 // Unlock around the join to avoid deadlocks if the callback tries to lock.
324 // This can happen if the callback returns AAUDIO_CALLBACK_RESULT_STOP
325 mStreamLock.unlock();
326 mAudioRecord->stopAndJoinCallbacks();
327 mStreamLock.lock();
328
329 mAudioRecord.clear();
330 // Do not close mFixedBlockReader. It has a unique_ptr to its buffer
331 // so it will clean up by itself.
332 AudioStream::close_l();
333 }
334
maybeConvertDeviceData(const void * audioData,int32_t numFrames)335 const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
336 if (mFormatConversionBufferFloat.get() != nullptr) {
337 LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
338 "%s() conversion size %d too large for buffer %d",
339 __func__, numFrames, mFormatConversionBufferSizeInFrames);
340
341 int32_t numSamples = numFrames * getSamplesPerFrame();
342 // Only conversion supported is I16 to FLOAT
343 memcpy_to_float_from_i16(
344 mFormatConversionBufferFloat.get(),
345 (const int16_t *) audioData,
346 numSamples);
347 return mFormatConversionBufferFloat.get();
348 } else {
349 return audioData;
350 }
351 }
352
processCallback(int event,void * info)353 void AudioStreamRecord::processCallback(int event, void *info) {
354 switch (event) {
355 case AudioRecord::EVENT_MORE_DATA:
356 processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
357 break;
358
359 // Stream got rerouted so we disconnect.
360 case AudioRecord::EVENT_NEW_IAUDIORECORD:
361 processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
362 break;
363
364 default:
365 break;
366 }
367 return;
368 }
369
requestStart_l()370 aaudio_result_t AudioStreamRecord::requestStart_l()
371 {
372 if (mAudioRecord.get() == nullptr) {
373 return AAUDIO_ERROR_INVALID_STATE;
374 }
375
376 // Enable callback before starting AudioRecord to avoid shutting
377 // down because of a race condition.
378 mCallbackEnabled.store(true);
379 aaudio_stream_state_t originalState = getState();
380 // Set before starting the callback so that we are in the correct state
381 // before updateStateMachine() can be called by the callback.
382 setState(AAUDIO_STREAM_STATE_STARTING);
383 mFramesWritten.reset32(); // service writes frames
384 mTimestampPosition.reset32();
385 status_t err = mAudioRecord->start(); // resets position to zero
386 if (err != OK) {
387 mCallbackEnabled.store(false);
388 setState(originalState);
389 return AAudioConvert_androidToAAudioResult(err);
390 }
391 return AAUDIO_OK;
392 }
393
requestStop_l()394 aaudio_result_t AudioStreamRecord::requestStop_l() {
395 if (mAudioRecord.get() == nullptr) {
396 return AAUDIO_ERROR_INVALID_STATE;
397 }
398 setState(AAUDIO_STREAM_STATE_STOPPING);
399 mFramesWritten.catchUpTo(getFramesRead());
400 mTimestampPosition.catchUpTo(getFramesRead());
401 mAudioRecord->stop();
402 mCallbackEnabled.store(false);
403 // Pass false to prevent errorCallback from being called after disconnect
404 // when app has already requested a stop().
405 return checkForDisconnectRequest(false);
406 }
407
updateStateMachine()408 aaudio_result_t AudioStreamRecord::updateStateMachine()
409 {
410 aaudio_result_t result = AAUDIO_OK;
411 aaudio_wrapping_frames_t position;
412 status_t err;
413 switch (getState()) {
414 // TODO add better state visibility to AudioRecord
415 case AAUDIO_STREAM_STATE_STARTING:
416 // When starting, the position will begin at zero and then go positive.
417 // The position can wrap but by that time the state will not be STARTING.
418 err = mAudioRecord->getPosition(&position);
419 if (err != OK) {
420 result = AAudioConvert_androidToAAudioResult(err);
421 } else if (position > 0) {
422 setState(AAUDIO_STREAM_STATE_STARTED);
423 }
424 break;
425 case AAUDIO_STREAM_STATE_STOPPING:
426 if (mAudioRecord->stopped()) {
427 setState(AAUDIO_STREAM_STATE_STOPPED);
428 }
429 break;
430 default:
431 break;
432 }
433 return result;
434 }
435
read(void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)436 aaudio_result_t AudioStreamRecord::read(void *buffer,
437 int32_t numFrames,
438 int64_t timeoutNanoseconds)
439 {
440 int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
441 int32_t numBytes;
442 // This will detect out of range values for numFrames.
443 aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
444 if (result != AAUDIO_OK) {
445 return result;
446 }
447
448 if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
449 return AAUDIO_ERROR_DISCONNECTED;
450 }
451
452 // TODO add timeout to AudioRecord
453 bool blocking = (timeoutNanoseconds > 0);
454
455 ssize_t bytesActuallyRead = 0;
456 ssize_t totalBytesRead = 0;
457 if (mFormatConversionBufferI16.get() != nullptr) {
458 // Convert I16 data to float using an intermediate buffer.
459 float *floatBuffer = (float *) buffer;
460 int32_t framesLeft = numFrames;
461 // Perform conversion using multiple read()s if necessary.
462 while (framesLeft > 0) {
463 // Read into short internal buffer.
464 int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
465 size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
466 bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
467 if (bytesActuallyRead <= 0) {
468 break;
469 }
470 totalBytesRead += bytesActuallyRead;
471 int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
472 // Convert into app float buffer.
473 size_t numSamples = framesToConvert * getSamplesPerFrame();
474 memcpy_to_float_from_i16(
475 floatBuffer,
476 mFormatConversionBufferI16.get(),
477 numSamples);
478 floatBuffer += numSamples;
479 framesLeft -= framesToConvert;
480 }
481 } else {
482 bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
483 totalBytesRead = bytesActuallyRead;
484 }
485 if (bytesActuallyRead == WOULD_BLOCK) {
486 return 0;
487 } else if (bytesActuallyRead < 0) {
488 // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
489 // AudioRecord invalidation.
490 if (bytesActuallyRead == DEAD_OBJECT) {
491 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
492 return AAUDIO_ERROR_DISCONNECTED;
493 }
494 return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
495 }
496 int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
497 incrementFramesRead(framesRead);
498
499 result = updateStateMachine();
500 if (result != AAUDIO_OK) {
501 return result;
502 }
503
504 return (aaudio_result_t) framesRead;
505 }
506
setBufferSize(int32_t requestedFrames)507 aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
508 {
509 return getBufferSize();
510 }
511
getBufferSize() const512 int32_t AudioStreamRecord::getBufferSize() const
513 {
514 return getBufferCapacity(); // TODO implement in AudioRecord?
515 }
516
getBufferCapacityFromDevice() const517 int32_t AudioStreamRecord::getBufferCapacityFromDevice() const
518 {
519 return static_cast<int32_t>(mAudioRecord->frameCount());
520 }
521
getXRunCount() const522 int32_t AudioStreamRecord::getXRunCount() const
523 {
524 return 0; // TODO implement when AudioRecord supports it
525 }
526
getFramesPerBurstFromDevice() const527 int32_t AudioStreamRecord::getFramesPerBurstFromDevice() const {
528 return static_cast<int32_t>(mAudioRecord->getNotificationPeriodInFrames());
529 }
530
getTimestamp(clockid_t clockId,int64_t * framePosition,int64_t * timeNanoseconds)531 aaudio_result_t AudioStreamRecord::getTimestamp(clockid_t clockId,
532 int64_t *framePosition,
533 int64_t *timeNanoseconds) {
534 ExtendedTimestamp extendedTimestamp;
535 if (getState() != AAUDIO_STREAM_STATE_STARTED) {
536 return AAUDIO_ERROR_INVALID_STATE;
537 }
538 status_t status = mAudioRecord->getTimestamp(&extendedTimestamp);
539 if (status == WOULD_BLOCK) {
540 return AAUDIO_ERROR_INVALID_STATE;
541 } else if (status != NO_ERROR) {
542 return AAudioConvert_androidToAAudioResult(status);
543 }
544 return getBestTimestamp(clockId, framePosition, timeNanoseconds, &extendedTimestamp);
545 }
546
getFramesWritten()547 int64_t AudioStreamRecord::getFramesWritten() {
548 aaudio_wrapping_frames_t position;
549 status_t result;
550 switch (getState()) {
551 case AAUDIO_STREAM_STATE_STARTING:
552 case AAUDIO_STREAM_STATE_STARTED:
553 result = mAudioRecord->getPosition(&position);
554 if (result == OK) {
555 mFramesWritten.update32(position);
556 }
557 break;
558 case AAUDIO_STREAM_STATE_STOPPING:
559 default:
560 break;
561 }
562 return AudioStreamLegacy::getFramesWritten();
563 }
564