1 /*
2 * Copyright 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #undef LOG_TAG
18 #define LOG_TAG "Planner"
19 // #define LOG_NDEBUG 0
20 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
21
22 #include <android-base/properties.h>
23 #include <compositionengine/impl/planner/Flattener.h>
24 #include <compositionengine/impl/planner/LayerState.h>
25
26 #include <gui/TraceUtils.h>
27
28 using time_point = std::chrono::steady_clock::time_point;
29 using namespace std::chrono_literals;
30
31 namespace android::compositionengine::impl::planner {
32
33 namespace {
34
35 // True if the underlying layer stack is the same modulo state that would be expected to be
36 // different like specific buffers, false otherwise.
isSameStack(const std::vector<const LayerState * > & incomingLayers,const std::vector<CachedSet> & cachedSets)37 bool isSameStack(const std::vector<const LayerState*>& incomingLayers,
38 const std::vector<CachedSet>& cachedSets) {
39 std::vector<const LayerState*> existingLayers;
40 for (auto& cachedSet : cachedSets) {
41 for (auto& layer : cachedSet.getConstituentLayers()) {
42 existingLayers.push_back(layer.getState());
43 }
44 }
45
46 if (incomingLayers.size() != existingLayers.size()) {
47 return false;
48 }
49
50 for (size_t i = 0; i < incomingLayers.size(); i++) {
51 // Checking the IDs here is very strict, but we do this as otherwise we may mistakenly try
52 // to access destroyed OutputLayers later on.
53 if (incomingLayers[i]->getId() != existingLayers[i]->getId() ||
54 incomingLayers[i]->getDifferingFields(*(existingLayers[i])) != LayerStateField::None) {
55 return false;
56 }
57 }
58 return true;
59 }
60
61 } // namespace
62
Flattener(renderengine::RenderEngine & renderEngine,const Tunables & tunables)63 Flattener::Flattener(renderengine::RenderEngine& renderEngine, const Tunables& tunables)
64 : mRenderEngine(renderEngine), mTunables(tunables), mTexturePool(mRenderEngine) {}
65
flattenLayers(const std::vector<const LayerState * > & layers,NonBufferHash hash,time_point now)66 NonBufferHash Flattener::flattenLayers(const std::vector<const LayerState*>& layers,
67 NonBufferHash hash, time_point now) {
68 ATRACE_CALL();
69 const size_t unflattenedDisplayCost = calculateDisplayCost(layers);
70 mUnflattenedDisplayCost += unflattenedDisplayCost;
71
72 // We invalidate the layer cache if:
73 // 1. We're not tracking any layers, or
74 // 2. The last seen hashed geometry changed between frames, or
75 // 3. A stricter equality check demonstrates that the layer stack really did change, since the
76 // hashed geometry does not guarantee uniqueness.
77 if (mCurrentGeometry != hash || (!mLayers.empty() && !isSameStack(layers, mLayers))) {
78 resetActivities(hash, now);
79 mFlattenedDisplayCost += unflattenedDisplayCost;
80 return hash;
81 }
82
83 ++mInitialLayerCounts[layers.size()];
84
85 // Only buildCachedSets if these layers are already stored in mLayers.
86 // Otherwise (i.e. mergeWithCachedSets returns false), the time has not
87 // changed, so buildCachedSets will never find any runs.
88 const bool alreadyHadCachedSets = mergeWithCachedSets(layers, now);
89
90 ++mFinalLayerCounts[mLayers.size()];
91
92 if (alreadyHadCachedSets) {
93 buildCachedSets(now);
94 hash = computeLayersHash();
95 }
96
97 return hash;
98 }
99
renderCachedSets(const OutputCompositionState & outputState,std::optional<std::chrono::steady_clock::time_point> renderDeadline)100 void Flattener::renderCachedSets(
101 const OutputCompositionState& outputState,
102 std::optional<std::chrono::steady_clock::time_point> renderDeadline) {
103 ATRACE_CALL();
104
105 if (!mNewCachedSet) {
106 return;
107 }
108
109 // Ensure that a cached set has a valid buffer first
110 if (mNewCachedSet->hasRenderedBuffer()) {
111 ATRACE_NAME("mNewCachedSet->hasRenderedBuffer()");
112 return;
113 }
114
115 const auto now = std::chrono::steady_clock::now();
116
117 // If we have a render deadline, and the flattener is configured to skip rendering if we don't
118 // have enough time, then we skip rendering the cached set if we think that we'll steal too much
119 // time from the next frame.
120 if (renderDeadline && mTunables.mRenderScheduling) {
121 if (const auto estimatedRenderFinish =
122 now + mTunables.mRenderScheduling->cachedSetRenderDuration;
123 estimatedRenderFinish > *renderDeadline) {
124 mNewCachedSet->incrementSkipCount();
125
126 if (mNewCachedSet->getSkipCount() <=
127 mTunables.mRenderScheduling->maxDeferRenderAttempts) {
128 ATRACE_FORMAT("DeadlinePassed: exceeded deadline by: %d us",
129 std::chrono::duration_cast<std::chrono::microseconds>(
130 estimatedRenderFinish - *renderDeadline)
131 .count());
132 return;
133 } else {
134 ATRACE_NAME("DeadlinePassed: exceeded max skips");
135 }
136 }
137 }
138
139 mNewCachedSet->render(mRenderEngine, mTexturePool, outputState);
140 }
141
dumpLayers(std::string & result) const142 void Flattener::dumpLayers(std::string& result) const {
143 result.append(" Current layers:");
144 for (const CachedSet& layer : mLayers) {
145 result.append("\n");
146 layer.dump(result);
147 }
148 }
149
dump(std::string & result) const150 void Flattener::dump(std::string& result) const {
151 const auto now = std::chrono::steady_clock::now();
152
153 base::StringAppendF(&result, "Flattener state:\n");
154
155 result.append("\n Statistics:\n");
156
157 result.append(" Display cost (in screen-size buffers):\n");
158 const size_t displayArea = static_cast<size_t>(mDisplaySize.width * mDisplaySize.height);
159 base::StringAppendF(&result, " Unflattened: %.2f\n",
160 static_cast<float>(mUnflattenedDisplayCost) / displayArea);
161 base::StringAppendF(&result, " Flattened: %.2f\n",
162 static_cast<float>(mFlattenedDisplayCost) / displayArea);
163
164 const auto compareLayerCounts = [](const std::pair<size_t, size_t>& left,
165 const std::pair<size_t, size_t>& right) {
166 return left.first < right.first;
167 };
168
169 const size_t maxLayerCount = mInitialLayerCounts.empty()
170 ? 0u
171 : std::max_element(mInitialLayerCounts.cbegin(), mInitialLayerCounts.cend(),
172 compareLayerCounts)
173 ->first;
174
175 result.append("\n Initial counts:\n");
176 for (size_t count = 1; count < maxLayerCount; ++count) {
177 size_t initial = mInitialLayerCounts.count(count) > 0 ? mInitialLayerCounts.at(count) : 0;
178 base::StringAppendF(&result, " % 2zd: %zd\n", count, initial);
179 }
180
181 result.append("\n Final counts:\n");
182 for (size_t count = 1; count < maxLayerCount; ++count) {
183 size_t final = mFinalLayerCounts.count(count) > 0 ? mFinalLayerCounts.at(count) : 0;
184 base::StringAppendF(&result, " % 2zd: %zd\n", count, final);
185 }
186
187 base::StringAppendF(&result, "\n Cached sets created: %zd\n", mCachedSetCreationCount);
188 base::StringAppendF(&result, " Cost: %.2f\n",
189 static_cast<float>(mCachedSetCreationCost) / displayArea);
190
191 const auto lastUpdate =
192 std::chrono::duration_cast<std::chrono::milliseconds>(now - mLastGeometryUpdate);
193 base::StringAppendF(&result, "\n Current hash %016zx, last update %sago\n\n", mCurrentGeometry,
194 durationString(lastUpdate).c_str());
195
196 dumpLayers(result);
197
198 base::StringAppendF(&result, "\n");
199 mTexturePool.dump(result);
200 }
201
calculateDisplayCost(const std::vector<const LayerState * > & layers) const202 size_t Flattener::calculateDisplayCost(const std::vector<const LayerState*>& layers) const {
203 Region coveredRegion;
204 size_t displayCost = 0;
205 bool hasClientComposition = false;
206
207 for (const LayerState* layer : layers) {
208 coveredRegion.orSelf(layer->getDisplayFrame());
209
210 // Regardless of composition type, we always have to read each input once
211 displayCost += static_cast<size_t>(layer->getDisplayFrame().width() *
212 layer->getDisplayFrame().height());
213
214 hasClientComposition |= layer->getCompositionType() == hal::Composition::CLIENT;
215 }
216
217 if (hasClientComposition) {
218 // If there is client composition, the client target buffer has to be both written by the
219 // GPU and read by the DPU, so we pay its cost twice
220 displayCost += 2 *
221 static_cast<size_t>(coveredRegion.bounds().width() *
222 coveredRegion.bounds().height());
223 }
224
225 return displayCost;
226 }
227
resetActivities(NonBufferHash hash,time_point now)228 void Flattener::resetActivities(NonBufferHash hash, time_point now) {
229 ALOGV("[%s]", __func__);
230
231 mCurrentGeometry = hash;
232 mLastGeometryUpdate = now;
233
234 for (const CachedSet& cachedSet : mLayers) {
235 if (cachedSet.getLayerCount() > 1) {
236 ++mInvalidatedCachedSetAges[cachedSet.getAge()];
237 }
238 }
239
240 mLayers.clear();
241
242 if (mNewCachedSet) {
243 ++mInvalidatedCachedSetAges[mNewCachedSet->getAge()];
244 mNewCachedSet = std::nullopt;
245 }
246 }
247
computeLayersHash() const248 NonBufferHash Flattener::computeLayersHash() const{
249 size_t hash = 0;
250 for (const auto& layer : mLayers) {
251 android::hashCombineSingleHashed(hash, layer.getNonBufferHash());
252 }
253 return hash;
254 }
255
256 // Only called if the geometry matches the last frame. Return true if mLayers
257 // was already populated with these layers, i.e. on the second and following
258 // calls with the same geometry.
mergeWithCachedSets(const std::vector<const LayerState * > & layers,time_point now)259 bool Flattener::mergeWithCachedSets(const std::vector<const LayerState*>& layers, time_point now) {
260 ATRACE_CALL();
261 std::vector<CachedSet> merged;
262
263 if (mLayers.empty()) {
264 merged.reserve(layers.size());
265 for (const LayerState* layer : layers) {
266 merged.emplace_back(layer, now);
267 mFlattenedDisplayCost += merged.back().getDisplayCost();
268 }
269 mLayers = std::move(merged);
270 return false;
271 }
272
273 // the compiler should strip out the following no-op loops when ALOGV is off
274 ALOGV("[%s] Incoming layers:", __func__);
275 for (const LayerState* layer : layers) {
276 ALOGV("%s", layer->getName().c_str());
277 }
278
279 ALOGV("[%s] Current layers:", __func__);
280 for (const CachedSet& layer : mLayers) {
281 const auto dumper = [&] {
282 std::string dump;
283 layer.dump(dump);
284 return dump;
285 };
286 ALOGV("%s", dumper().c_str());
287 }
288
289 auto currentLayerIter = mLayers.begin();
290 auto incomingLayerIter = layers.begin();
291
292 // If not null, this represents the layer that is blurring the layer before
293 // currentLayerIter. The blurring was stored in the override buffer, so the
294 // layer that requests the blur no longer needs to do any blurring.
295 compositionengine::OutputLayer* priorBlurLayer = nullptr;
296
297 while (incomingLayerIter != layers.end()) {
298 if (mNewCachedSet &&
299 mNewCachedSet->getFirstLayer().getState()->getId() == (*incomingLayerIter)->getId()) {
300 if (mNewCachedSet->hasBufferUpdate()) {
301 ALOGV("[%s] Dropping new cached set", __func__);
302 ++mInvalidatedCachedSetAges[0];
303 mNewCachedSet = std::nullopt;
304 } else if (mNewCachedSet->hasReadyBuffer()) {
305 ALOGV("[%s] Found ready buffer", __func__);
306 size_t skipCount = mNewCachedSet->getLayerCount();
307 while (skipCount != 0) {
308 auto* peekThroughLayer = mNewCachedSet->getHolePunchLayer();
309 const size_t layerCount = currentLayerIter->getLayerCount();
310 for (size_t i = 0; i < layerCount; ++i) {
311 bool disableBlur = priorBlurLayer &&
312 priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
313 OutputLayer::CompositionState& state =
314 (*incomingLayerIter)->getOutputLayer()->editState();
315 state.overrideInfo = {
316 .buffer = mNewCachedSet->getBuffer(),
317 .acquireFence = mNewCachedSet->getDrawFence(),
318 .displayFrame = mNewCachedSet->getTextureBounds(),
319 .dataspace = mNewCachedSet->getOutputDataspace(),
320 .displaySpace = mNewCachedSet->getOutputSpace(),
321 .damageRegion = Region::INVALID_REGION,
322 .visibleRegion = mNewCachedSet->getVisibleRegion(),
323 .peekThroughLayer = peekThroughLayer,
324 .disableBackgroundBlur = disableBlur,
325 };
326 ++incomingLayerIter;
327 }
328
329 if (currentLayerIter->getLayerCount() > 1) {
330 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
331 }
332 ++currentLayerIter;
333
334 skipCount -= layerCount;
335 }
336 priorBlurLayer = mNewCachedSet->getBlurLayer();
337 merged.emplace_back(std::move(*mNewCachedSet));
338 mNewCachedSet = std::nullopt;
339 continue;
340 }
341 }
342
343 if (!currentLayerIter->hasBufferUpdate()) {
344 currentLayerIter->incrementAge();
345 merged.emplace_back(*currentLayerIter);
346
347 // Skip the incoming layers corresponding to this valid current layer
348 const size_t layerCount = currentLayerIter->getLayerCount();
349 auto* peekThroughLayer = currentLayerIter->getHolePunchLayer();
350 for (size_t i = 0; i < layerCount; ++i) {
351 bool disableBlur =
352 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
353 OutputLayer::CompositionState& state =
354 (*incomingLayerIter)->getOutputLayer()->editState();
355 state.overrideInfo = {
356 .buffer = currentLayerIter->getBuffer(),
357 .acquireFence = currentLayerIter->getDrawFence(),
358 .displayFrame = currentLayerIter->getTextureBounds(),
359 .dataspace = currentLayerIter->getOutputDataspace(),
360 .displaySpace = currentLayerIter->getOutputSpace(),
361 .damageRegion = Region(),
362 .visibleRegion = currentLayerIter->getVisibleRegion(),
363 .peekThroughLayer = peekThroughLayer,
364 .disableBackgroundBlur = disableBlur,
365 };
366 ++incomingLayerIter;
367 }
368 } else if (currentLayerIter->getLayerCount() > 1) {
369 // Break the current layer into its constituent layers
370 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
371 for (CachedSet& layer : currentLayerIter->decompose()) {
372 bool disableBlur =
373 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
374 OutputLayer::CompositionState& state =
375 (*incomingLayerIter)->getOutputLayer()->editState();
376 state.overrideInfo.disableBackgroundBlur = disableBlur;
377 layer.updateAge(now);
378 merged.emplace_back(layer);
379 ++incomingLayerIter;
380 }
381 } else {
382 bool disableBlur =
383 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
384 OutputLayer::CompositionState& state =
385 (*incomingLayerIter)->getOutputLayer()->editState();
386 state.overrideInfo.disableBackgroundBlur = disableBlur;
387 currentLayerIter->updateAge(now);
388 merged.emplace_back(*currentLayerIter);
389 ++incomingLayerIter;
390 }
391 priorBlurLayer = currentLayerIter->getBlurLayer();
392 ++currentLayerIter;
393 }
394
395 for (const CachedSet& layer : merged) {
396 mFlattenedDisplayCost += layer.getDisplayCost();
397 }
398
399 mLayers = std::move(merged);
400 return true;
401 }
402
findCandidateRuns(time_point now) const403 std::vector<Flattener::Run> Flattener::findCandidateRuns(time_point now) const {
404 ATRACE_CALL();
405 std::vector<Run> runs;
406 bool isPartOfRun = false;
407 Run::Builder builder;
408 bool firstLayer = true;
409 bool runHasFirstLayer = false;
410
411 for (auto currentSet = mLayers.cbegin(); currentSet != mLayers.cend(); ++currentSet) {
412 const bool layerIsInactive =
413 now - currentSet->getLastUpdate() > mTunables.mActiveLayerTimeout;
414 const bool layerHasBlur = currentSet->hasBlurBehind();
415
416 if (layerIsInactive && (firstLayer || runHasFirstLayer || !layerHasBlur) &&
417 !currentSet->hasUnsupportedDataspace()) {
418 if (isPartOfRun) {
419 builder.append(currentSet->getLayerCount());
420 } else {
421 // Runs can't start with a non-buffer layer
422 if (currentSet->getFirstLayer().getBuffer() == nullptr) {
423 ALOGV("[%s] Skipping initial non-buffer layer", __func__);
424 } else {
425 builder.init(currentSet);
426 if (firstLayer) {
427 runHasFirstLayer = true;
428 }
429 isPartOfRun = true;
430 }
431 }
432 } else if (isPartOfRun) {
433 builder.setHolePunchCandidate(&(*currentSet));
434
435 // If we're here then this blur layer recently had an active buffer updating, meaning
436 // that there is exactly one layer. Blur radius currently is part of layer stack
437 // geometry, so we're also guaranteed that the background blur radius hasn't changed for
438 // at least as long as this new inactive cached set.
439 if (runHasFirstLayer && layerHasBlur &&
440 currentSet->getFirstLayer().getBackgroundBlurRadius() > 0) {
441 builder.setBlurringLayer(&(*currentSet));
442 }
443 if (auto run = builder.validateAndBuild(); run) {
444 runs.push_back(*run);
445 }
446
447 runHasFirstLayer = false;
448 builder.reset();
449 isPartOfRun = false;
450 }
451
452 firstLayer = false;
453 }
454
455 // If we're in the middle of a run at the end, we still need to validate and build it.
456 if (isPartOfRun) {
457 if (auto run = builder.validateAndBuild(); run) {
458 runs.push_back(*run);
459 }
460 }
461
462 ALOGV("[%s] Found %zu candidate runs", __func__, runs.size());
463
464 return runs;
465 }
466
findBestRun(std::vector<Flattener::Run> & runs) const467 std::optional<Flattener::Run> Flattener::findBestRun(std::vector<Flattener::Run>& runs) const {
468 if (runs.empty()) {
469 return std::nullopt;
470 }
471
472 // TODO (b/181192467): Choose the best run, instead of just the first.
473 return runs[0];
474 }
475
buildCachedSets(time_point now)476 void Flattener::buildCachedSets(time_point now) {
477 ATRACE_CALL();
478 if (mLayers.empty()) {
479 ALOGV("[%s] No layers found, returning", __func__);
480 return;
481 }
482
483 // Don't try to build a new cached set if we already have a new one in progress
484 if (mNewCachedSet) {
485 return;
486 }
487
488 for (const CachedSet& layer : mLayers) {
489 // TODO (b/191997217): make it less aggressive, and sync with findCandidateRuns
490 if (layer.hasProtectedLayers()) {
491 ATRACE_NAME("layer->hasProtectedLayers()");
492 return;
493 }
494 }
495
496 std::vector<Run> runs = findCandidateRuns(now);
497
498 std::optional<Run> bestRun = findBestRun(runs);
499
500 if (!bestRun) {
501 return;
502 }
503
504 mNewCachedSet.emplace(*bestRun->getStart());
505 mNewCachedSet->setLastUpdate(now);
506 auto currentSet = bestRun->getStart();
507 while (mNewCachedSet->getLayerCount() < bestRun->getLayerLength()) {
508 ++currentSet;
509 mNewCachedSet->append(*currentSet);
510 }
511
512 if (bestRun->getBlurringLayer()) {
513 mNewCachedSet->addBackgroundBlurLayer(*bestRun->getBlurringLayer());
514 }
515
516 if (mTunables.mEnableHolePunch && bestRun->getHolePunchCandidate() &&
517 bestRun->getHolePunchCandidate()->requiresHolePunch()) {
518 // Add the pip layer to mNewCachedSet, but in a special way - it should
519 // replace the buffer with a clear round rect.
520 mNewCachedSet->addHolePunchLayerIfFeasible(*bestRun->getHolePunchCandidate(),
521 bestRun->getStart() == mLayers.cbegin());
522 }
523
524 // TODO(b/181192467): Actually compute new LayerState vector and corresponding hash for each run
525 // and feedback into the predictor
526
527 ++mCachedSetCreationCount;
528 mCachedSetCreationCost += mNewCachedSet->getCreationCost();
529
530 // note the compiler should strip the follow no-op statements when ALOGV is off
531 const auto dumper = [&] {
532 std::string setDump;
533 mNewCachedSet->dump(setDump);
534 return setDump;
535 };
536 ALOGV("[%s] Added new cached set:\n%s", __func__, dumper().c_str());
537 }
538
539 } // namespace android::compositionengine::impl::planner
540