1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <android-base/logging.h>
18 #include <android-base/properties.h>
19 #include <ftw.h>
20 #include <gtest/gtest.h>
21 #include <unistd.h>
22
23 #include <algorithm>
24 #include <cassert>
25 #include <cmath>
26 #include <fstream>
27 #include <iostream>
28 #include <map>
29 #include <memory>
30 #include <set>
31 #include <string>
32 #include <thread>
33 #include <utility>
34 #include <vector>
35
36 #include "AndroidVersionUtil.h"
37 #include "GeneratedTestUtils.h"
38 #include "TestHarness.h"
39 #include "TestNeuralNetworksWrapper.h"
40 #include "TestUtils.h"
41
42 // Systrace is not available from CTS tests due to platform layering
43 // constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be
44 // the case for CTS (public APIs only).
45 #ifndef NNTEST_ONLY_PUBLIC_API
46 #include <Tracing.h>
47 #else
48 #define NNTRACE_FULL_RAW(...)
49 #define NNTRACE_APP(...)
50 #define NNTRACE_APP_SWITCH(...)
51 #endif
52
53 #ifdef NNTEST_CTS
54 #define NNTEST_COMPUTE_MODE
55 #endif
56
57 namespace android::nn::generated_tests {
58 using namespace test_wrapper;
59 using namespace test_helper;
60
61 class GeneratedTests : public GeneratedTestBase {
62 protected:
63 void SetUp() override;
64 void TearDown() override;
65
66 bool shouldSkipTest();
67
68 std::optional<Compilation> compileModel(const Model& model);
69 void executeInternal(const Compilation& compilation, const TestModel& testModel,
70 bool testReusableExecution);
71 void executeWithCompilation(const Compilation& compilation, const TestModel& testModel);
72 void executeOnce(const Model& model, const TestModel& testModel);
73 void executeMultithreadedOwnCompilation(const Model& model, const TestModel& testModel);
74 void executeMultithreadedSharedCompilation(const Model& model, const TestModel& testModel);
75 // Test driver for those generated from ml/nn/runtime/test/spec
76 void execute(const TestModel& testModel);
77
78 // VNDK version of the device under test.
79 static int mVndkVersion;
80
81 std::string mCacheDir;
82 std::vector<uint8_t> mToken;
83 bool mTestCompilationCaching = false;
84 bool mTestDynamicOutputShape = false;
85 bool mExpectFailure = false;
86 bool mTestQuantizationCoupling = false;
87 bool mTestDeviceMemory = false;
88 bool mTestReusableExecution = true;
89 Execution::ComputeMode mComputeMode = Execution::getComputeMode();
90 };
91
92 int GeneratedTests::mVndkVersion = __ANDROID_API_FUTURE__;
93
94 // Tag for the dynamic output shape tests
95 class DynamicOutputShapeTest : public GeneratedTests {
96 protected:
DynamicOutputShapeTest()97 DynamicOutputShapeTest() { mTestDynamicOutputShape = true; }
98 };
99
100 // Tag for the fenced execute tests
101 class FencedComputeTest : public GeneratedTests {};
102
103 // Tag for the generated validation tests
104 class GeneratedValidationTests : public GeneratedTests {
105 protected:
GeneratedValidationTests()106 GeneratedValidationTests() { mExpectFailure = true; }
107 };
108
109 class QuantizationCouplingTest : public GeneratedTests {
110 protected:
QuantizationCouplingTest()111 QuantizationCouplingTest() {
112 mTestQuantizationCoupling = true;
113 // QuantizationCouplingTest is intended for verifying if a driver supports ASYMM quant8, it
114 // must support SYMM quant8. All the models in QuantizationCouplingTest will also be
115 // executed in other test suites, so there is no need to test reusable execution again.
116 mTestReusableExecution = false;
117 }
118 };
119
120 class DeviceMemoryTest : public GeneratedTests {
121 protected:
DeviceMemoryTest()122 DeviceMemoryTest() { mTestDeviceMemory = true; }
123 };
124
compileModel(const Model & model)125 std::optional<Compilation> GeneratedTests::compileModel(const Model& model) {
126 NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
127 if (mTestCompilationCaching) {
128 // Compile the model twice with the same token, so that compilation caching will be
129 // exercised if supported by the driver.
130 // No invalid model will be passed to this branch.
131 EXPECT_FALSE(mExpectFailure);
132 Compilation compilation1(&model);
133 EXPECT_EQ(compilation1.setCaching(mCacheDir, mToken), Result::NO_ERROR);
134 EXPECT_EQ(compilation1.finish(), Result::NO_ERROR);
135 Compilation compilation2(&model);
136 EXPECT_EQ(compilation2.setCaching(mCacheDir, mToken), Result::NO_ERROR);
137 EXPECT_EQ(compilation2.finish(), Result::NO_ERROR);
138 return compilation2;
139 } else {
140 Compilation compilation(&model);
141 Result result = compilation.finish();
142
143 // For valid model, we check the compilation result == NO_ERROR.
144 // For invalid model, the driver may fail at compilation or execution, so any result code is
145 // permitted at this point.
146 if (mExpectFailure && result != Result::NO_ERROR) return std::nullopt;
147 EXPECT_EQ(result, Result::NO_ERROR);
148 return compilation;
149 }
150 }
151
createDeviceMemoryForInput(const Compilation & compilation,uint32_t index)152 static ANeuralNetworksMemory* createDeviceMemoryForInput(const Compilation& compilation,
153 uint32_t index) {
154 ANeuralNetworksMemoryDesc* desc = nullptr;
155 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
156 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, compilation.getHandle(), index, 1.0f),
157 ANEURALNETWORKS_NO_ERROR);
158 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
159 ANeuralNetworksMemory* memory = nullptr;
160 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
161 ANeuralNetworksMemoryDesc_free(desc);
162 return memory;
163 }
164
createDeviceMemoryForOutput(const Compilation & compilation,uint32_t index)165 static ANeuralNetworksMemory* createDeviceMemoryForOutput(const Compilation& compilation,
166 uint32_t index) {
167 ANeuralNetworksMemoryDesc* desc = nullptr;
168 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
169 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, compilation.getHandle(), index, 1.0f),
170 ANEURALNETWORKS_NO_ERROR);
171 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
172 ANeuralNetworksMemory* memory = nullptr;
173 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
174 ANeuralNetworksMemoryDesc_free(desc);
175 return memory;
176 }
177
createRequestWithDeviceMemories(const Compilation & compilation,const TestModel & testModel,Execution * execution,std::vector<Memory> * inputMemories,std::vector<Memory> * outputMemories)178 static void createRequestWithDeviceMemories(const Compilation& compilation,
179 const TestModel& testModel, Execution* execution,
180 std::vector<Memory>* inputMemories,
181 std::vector<Memory>* outputMemories) {
182 ASSERT_NE(execution, nullptr);
183 ASSERT_NE(inputMemories, nullptr);
184 ASSERT_NE(outputMemories, nullptr);
185
186 // Model inputs.
187 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
188 SCOPED_TRACE("Input index: " + std::to_string(i));
189 const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
190 // Omitted input.
191 if (operand.data.size() == 0) {
192 ASSERT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0));
193 continue;
194 }
195
196 // Create device memory.
197 ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i);
198 ASSERT_NE(memory, nullptr);
199 auto& wrapperMemory = inputMemories->emplace_back(memory);
200
201 // Copy data from TestBuffer to device memory.
202 auto ashmem = TestAshmem::createFrom(operand.data);
203 ASSERT_NE(ashmem, nullptr);
204 ASSERT_EQ(ANeuralNetworksMemory_copy(ashmem->get()->get(), memory),
205 ANEURALNETWORKS_NO_ERROR);
206 ASSERT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0));
207 }
208
209 // Model outputs.
210 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
211 SCOPED_TRACE("Output index: " + std::to_string(i));
212 ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i);
213 ASSERT_NE(memory, nullptr);
214 auto& wrapperMemory = outputMemories->emplace_back(memory);
215 ASSERT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0));
216 }
217 }
218
copyResultsFromDeviceMemories(const TestModel & testModel,const std::vector<Memory> & outputMemories,std::vector<TestBuffer> * outputs)219 static void copyResultsFromDeviceMemories(const TestModel& testModel,
220 const std::vector<Memory>& outputMemories,
221 std::vector<TestBuffer>* outputs) {
222 ASSERT_NE(outputs, nullptr);
223 ASSERT_EQ(testModel.main.outputIndexes.size(), outputMemories.size());
224 outputs->clear();
225
226 // Copy out output results.
227 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
228 SCOPED_TRACE("Output index: " + std::to_string(i));
229 const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
230 const size_t bufferSize = operand.data.size();
231 auto& output = outputs->emplace_back(bufferSize);
232
233 auto ashmem = TestAshmem::createFrom(output);
234 ASSERT_NE(ashmem, nullptr);
235 ASSERT_EQ(ANeuralNetworksMemory_copy(outputMemories[i].get(), ashmem->get()->get()),
236 ANEURALNETWORKS_NO_ERROR);
237 std::copy(ashmem->dataAs<uint8_t>(), ashmem->dataAs<uint8_t>() + bufferSize,
238 output.getMutable<uint8_t>());
239 }
240 }
241
executeInternal(const Compilation & compilation,const TestModel & testModel,bool testReusableExecution)242 void GeneratedTests::executeInternal(const Compilation& compilation, const TestModel& testModel,
243 bool testReusableExecution) {
244 NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeInternal example");
245
246 Execution execution(&compilation);
247 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
248 execution.setReusable(testReusableExecution);
249 }
250
251 std::vector<TestBuffer> outputs;
252 std::vector<Memory> inputMemories, outputMemories;
253
254 if (mTestDeviceMemory) {
255 createRequestWithDeviceMemories(compilation, testModel, &execution, &inputMemories,
256 &outputMemories);
257 } else {
258 createRequest(testModel, &execution, &outputs);
259 }
260
261 const auto computeAndCheckResults = [this, &testModel, &execution, &outputs, &outputMemories] {
262 Result result = execution.compute(mComputeMode);
263 if (mTestDeviceMemory) {
264 copyResultsFromDeviceMemories(testModel, outputMemories, &outputs);
265 }
266
267 if (result == Result::NO_ERROR && outputs.empty()) {
268 return;
269 }
270
271 {
272 NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeInternal example");
273 if (mExpectFailure) {
274 ASSERT_NE(result, Result::NO_ERROR);
275 return;
276 } else {
277 ASSERT_EQ(result, Result::NO_ERROR);
278 }
279
280 // Check output dimensions.
281 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
282 SCOPED_TRACE("Output index: " + std::to_string(i));
283 const auto& output = testModel.main.operands[testModel.main.outputIndexes[i]];
284 if (output.isIgnored) continue;
285 std::vector<uint32_t> actualDimensions;
286 ASSERT_EQ(Result::NO_ERROR,
287 execution.getOutputOperandDimensions(i, &actualDimensions));
288 ASSERT_EQ(output.dimensions, actualDimensions);
289 }
290
291 checkResults(testModel, outputs);
292 }
293 };
294
295 computeAndCheckResults();
296 if (testReusableExecution) {
297 computeAndCheckResults();
298 }
299 }
300
executeWithCompilation(const Compilation & compilation,const TestModel & testModel)301 void GeneratedTests::executeWithCompilation(const Compilation& compilation,
302 const TestModel& testModel) {
303 // Single-time and reusable executions have different code paths, so test both.
304 executeInternal(compilation, testModel, /*testReusableExecution=*/false);
305 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
306 if (mTestReusableExecution) {
307 executeInternal(compilation, testModel, /*testReusableExecution=*/true);
308 }
309 }
310 }
311
isPowerOfTwo(uint32_t x)312 static bool isPowerOfTwo(uint32_t x) {
313 return x > 0 && ((x & (x - 1)) == 0);
314 }
315
validateCompilationMemoryPreferences(const Compilation & compilation,const TestModel & testModel)316 static void validateCompilationMemoryPreferences(const Compilation& compilation,
317 const TestModel& testModel) {
318 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
319 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
320 SCOPED_TRACE("Input index: " + std::to_string(i));
321 uint32_t alignment = 0, padding = 0;
322 ASSERT_EQ(compilation.getPreferredMemoryAlignmentForInput(i, &alignment),
323 Result::NO_ERROR);
324 ASSERT_EQ(compilation.getPreferredMemoryPaddingForInput(i, &padding), Result::NO_ERROR);
325 EXPECT_TRUE(isPowerOfTwo(alignment)) << "alignment: " << alignment;
326 EXPECT_TRUE(isPowerOfTwo(padding)) << "padding: " << padding;
327 }
328 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
329 SCOPED_TRACE("Output index: " + std::to_string(i));
330 uint32_t alignment = 0, padding = 0;
331 ASSERT_EQ(compilation.getPreferredMemoryAlignmentForOutput(i, &alignment),
332 Result::NO_ERROR);
333 ASSERT_EQ(compilation.getPreferredMemoryPaddingForOutput(i, &padding),
334 Result::NO_ERROR);
335 EXPECT_TRUE(isPowerOfTwo(alignment)) << "alignment: " << alignment;
336 EXPECT_TRUE(isPowerOfTwo(padding)) << "padding: " << padding;
337 }
338 }
339 }
340
executeOnce(const Model & model,const TestModel & testModel)341 void GeneratedTests::executeOnce(const Model& model, const TestModel& testModel) {
342 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
343 std::optional<Compilation> compilation = compileModel(model);
344 // Early return if compilation fails. The compilation result code is checked in compileModel.
345 if (!compilation) return;
346 validateCompilationMemoryPreferences(compilation.value(), testModel);
347 executeWithCompilation(compilation.value(), testModel);
348 }
349
executeMultithreadedOwnCompilation(const Model & model,const TestModel & testModel)350 void GeneratedTests::executeMultithreadedOwnCompilation(const Model& model,
351 const TestModel& testModel) {
352 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedOwnCompilation");
353 SCOPED_TRACE("MultithreadedOwnCompilation");
354 std::vector<std::thread> threads;
355 for (int i = 0; i < 10; i++) {
356 threads.push_back(std::thread([&]() { executeOnce(model, testModel); }));
357 }
358 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
359 }
360
executeMultithreadedSharedCompilation(const Model & model,const TestModel & testModel)361 void GeneratedTests::executeMultithreadedSharedCompilation(const Model& model,
362 const TestModel& testModel) {
363 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation");
364 SCOPED_TRACE("MultithreadedSharedCompilation");
365 std::optional<Compilation> compilation = compileModel(model);
366 // Early return if compilation fails. The ompilation result code is checked in compileModel.
367 if (!compilation) return;
368 std::vector<std::thread> threads;
369 for (int i = 0; i < 10; i++) {
370 threads.push_back(
371 std::thread([&]() { executeWithCompilation(compilation.value(), testModel); }));
372 }
373 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
374 }
375
376 // Test driver for those generated from ml/nn/runtime/test/spec
execute(const TestModel & testModel)377 void GeneratedTests::execute(const TestModel& testModel) {
378 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute");
379 GeneratedModel model;
380 createModel(testModel, mTestDynamicOutputShape, &model);
381 if (testModel.expectFailure && !model.isValid()) {
382 return;
383 }
384 ASSERT_EQ(model.finish(), Result::NO_ERROR);
385 ASSERT_TRUE(model.isValid());
386 auto executeInternal = [&testModel, &model, this]() {
387 SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching));
388 #ifndef NNTEST_MULTITHREADED
389 executeOnce(model, testModel);
390 #else // defined(NNTEST_MULTITHREADED)
391 executeMultithreadedOwnCompilation(model, testModel);
392 executeMultithreadedSharedCompilation(model, testModel);
393 #endif // !defined(NNTEST_MULTITHREADED)
394 };
395 mTestCompilationCaching = false;
396 executeInternal();
397 if (!mExpectFailure) {
398 mTestCompilationCaching = true;
399 executeInternal();
400 }
401 }
402
shouldSkipTest()403 bool GeneratedTests::shouldSkipTest() {
404 // A map of {min VNDK version -> tests that should be skipped with earlier VNDK versions}.
405 // The listed tests are added in a later release, but exercising old APIs. They should be
406 // skipped if the device has a mixed build of system and vendor partitions.
407 static const std::map<int, std::set<std::string>> kMapOfMinVndkVersionToTests = {
408 {
409 __ANDROID_API_R__,
410 {
411 "add_broadcast_quant8_all_inputs_as_internal",
412 },
413 },
414 };
415 for (const auto& [minVersion, names] : kMapOfMinVndkVersionToTests) {
416 if (mVndkVersion < minVersion && names.count(kTestName) > 0) {
417 return true;
418 }
419 }
420 return false;
421 }
422
SetUp()423 void GeneratedTests::SetUp() {
424 GeneratedTestBase::SetUp();
425
426 mVndkVersion = ::android::base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__);
427 if (shouldSkipTest()) {
428 GTEST_SKIP();
429 return;
430 }
431
432 char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
433 char* cacheDir = mkdtemp(cacheDirTemp);
434 ASSERT_NE(cacheDir, nullptr);
435 mCacheDir = cacheDir;
436 mToken = std::vector<uint8_t>(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
437 }
438
TearDown()439 void GeneratedTests::TearDown() {
440 if (!::testing::Test::HasFailure()) {
441 // TODO: Switch to std::filesystem::remove_all once libc++fs is made available in CTS.
442 // Remove the cache directory specified by path recursively.
443 auto callback = [](const char* child, const struct stat*, int, struct FTW*) {
444 return remove(child);
445 };
446 nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
447 }
448 GeneratedTestBase::TearDown();
449 }
450
451 #ifdef NNTEST_COMPUTE_MODE
TEST_P(GeneratedTests,Sync)452 TEST_P(GeneratedTests, Sync) {
453 mComputeMode = Execution::ComputeMode::SYNC;
454 execute(testModel);
455 }
456
TEST_P(GeneratedTests,Async)457 TEST_P(GeneratedTests, Async) {
458 mComputeMode = Execution::ComputeMode::ASYNC;
459 execute(testModel);
460 }
461
TEST_P(GeneratedTests,Burst)462 TEST_P(GeneratedTests, Burst) {
463 mComputeMode = Execution::ComputeMode::BURST;
464 execute(testModel);
465 }
466 #else
TEST_P(GeneratedTests,Test)467 TEST_P(GeneratedTests, Test) {
468 execute(testModel);
469 }
470 #endif
471
TEST_P(DynamicOutputShapeTest,Test)472 TEST_P(DynamicOutputShapeTest, Test) {
473 execute(testModel);
474 }
475
TEST_P(GeneratedValidationTests,Test)476 TEST_P(GeneratedValidationTests, Test) {
477 execute(testModel);
478 }
479
TEST_P(QuantizationCouplingTest,Test)480 TEST_P(QuantizationCouplingTest, Test) {
481 execute(convertQuant8AsymmOperandsToSigned(testModel));
482 }
483
TEST_P(DeviceMemoryTest,Test)484 TEST_P(DeviceMemoryTest, Test) {
485 execute(testModel);
486 }
487
TEST_P(FencedComputeTest,Test)488 TEST_P(FencedComputeTest, Test) {
489 mComputeMode = Execution::ComputeMode::FENCED;
490 execute(testModel);
491 }
492
493 INSTANTIATE_GENERATED_TEST(GeneratedTests,
__anon6a3529560802(const TestModel& testModel) 494 [](const TestModel& testModel) { return !testModel.expectFailure; });
495
__anon6a3529560902(const TestModel& testModel) 496 INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) {
497 return !testModel.expectFailure && !testModel.hasScalarOutputs();
498 });
499
__anon6a3529560a02(const TestModel& testModel) 500 INSTANTIATE_GENERATED_TEST(GeneratedValidationTests, [](const TestModel& testModel) {
501 return testModel.expectFailure && !testModel.isInfiniteLoopTimeoutTest();
502 });
503
__anon6a3529560b02(const TestModel& testModel) 504 INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
505 return !testModel.expectFailure && testModel.main.operations.size() == 1 &&
506 testModel.referenced.size() == 0 && testModel.hasQuant8CoupledOperands();
507 });
508
__anon6a3529560c02(const TestModel& testModel) 509 INSTANTIATE_GENERATED_TEST(DeviceMemoryTest, [](const TestModel& testModel) {
510 return !testModel.expectFailure &&
511 std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
512 [&testModel](uint32_t index) {
513 return testModel.main.operands[index].data.size() > 0;
514 });
515 });
516
__anon6a3529560e02(const TestModel& testModel) 517 INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
518 return !testModel.expectFailure &&
519 std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
520 [&testModel](uint32_t index) {
521 return testModel.main.operands[index].data.size() > 0;
522 });
523 });
524
525 } // namespace android::nn::generated_tests
526