1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "GeneratedTestUtils.h"
18 
19 #include <android-base/logging.h>
20 #include <gtest/gtest.h>
21 
22 #include <algorithm>
23 #include <memory>
24 #include <string>
25 #include <utility>
26 #include <vector>
27 
28 #include "TestHarness.h"
29 
30 #ifdef NNTEST_SLTS
31 #include <android/hardware_buffer.h>
32 #include "SupportLibraryWrapper.h"
33 #else
34 #include "TestNeuralNetworksWrapper.h"
35 #endif
36 
37 namespace android::nn::generated_tests {
38 using namespace test_wrapper;
39 using namespace test_helper;
40 
getOperandType(const TestOperand & op,bool testDynamicOutputShape)41 static OperandType getOperandType(const TestOperand& op, bool testDynamicOutputShape) {
42     auto dims = op.dimensions;
43     if (testDynamicOutputShape && op.lifetime == TestOperandLifeTime::SUBGRAPH_OUTPUT) {
44         dims.assign(dims.size(), 0);
45     }
46     if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
47         return OperandType(
48                 static_cast<Type>(op.type), dims,
49                 SymmPerChannelQuantParams(op.channelQuant.scales, op.channelQuant.channelDim));
50     } else {
51         return OperandType(static_cast<Type>(op.type), dims, op.scale, op.zeroPoint);
52     }
53 }
54 
55 // A Memory object that owns AHardwareBuffer
56 class MemoryAHWB : public Memory {
57    public:
58 #ifdef NNTEST_SLTS
create(const NnApiSupportLibrary * nnapi,uint32_t size)59     static std::unique_ptr<MemoryAHWB> create(const NnApiSupportLibrary* nnapi, uint32_t size) {
60 #else
61     static std::unique_ptr<MemoryAHWB> create(uint32_t size) {
62 #endif
63         const uint64_t usage =
64                 AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
65         AHardwareBuffer_Desc desc = {
66                 .width = size,
67                 .height = 1,
68                 .layers = 1,
69                 .format = AHARDWAREBUFFER_FORMAT_BLOB,
70                 .usage = usage,
71         };
72         AHardwareBuffer* ahwb = nullptr;
73         EXPECT_EQ(AHardwareBuffer_allocate(&desc, &ahwb), 0);
74         EXPECT_NE(ahwb, nullptr);
75 
76         void* buffer = nullptr;
77         EXPECT_EQ(AHardwareBuffer_lock(ahwb, usage, -1, nullptr, &buffer), 0);
78         EXPECT_NE(buffer, nullptr);
79 
80 #ifdef NNTEST_SLTS
81         return std::unique_ptr<MemoryAHWB>(new MemoryAHWB(nnapi, ahwb, buffer));
82 #else
83         return std::unique_ptr<MemoryAHWB>(new MemoryAHWB(ahwb, buffer));
84 #endif
85     }
86 
87     ~MemoryAHWB() override {
88         EXPECT_EQ(AHardwareBuffer_unlock(mAhwb, nullptr), 0);
89         AHardwareBuffer_release(mAhwb);
90     }
91 
92     void* getPointer() const { return mBuffer; }
93 
94    private:
95 #ifdef NNTEST_SLTS
96     MemoryAHWB(const NnApiSupportLibrary* nnapi, AHardwareBuffer* ahwb, void* buffer)
97         : Memory(nnapi, ahwb, false, {}), mAhwb(ahwb), mBuffer(buffer) {}
98 #else
99     MemoryAHWB(AHardwareBuffer* ahwb, void* buffer) : Memory(ahwb), mAhwb(ahwb), mBuffer(buffer) {}
100 #endif
101 
102     AHardwareBuffer* mAhwb;
103     void* mBuffer;
104 };
105 
106 #ifdef NNTEST_SLTS
createConstantReferenceMemory(const NnApiSupportLibrary * nnapi,const TestModel & testModel)107 static std::unique_ptr<MemoryAHWB> createConstantReferenceMemory(const NnApiSupportLibrary* nnapi,
108                                                                  const TestModel& testModel) {
109 #else
110 static std::unique_ptr<MemoryAHWB> createConstantReferenceMemory(const TestModel& testModel) {
111 #endif
112     uint32_t size = 0;
113 
114     auto processSubgraph = [&size](const TestSubgraph& subgraph) {
115         for (const TestOperand& operand : subgraph.operands) {
116             if (operand.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
117                 size += operand.data.alignedSize();
118             }
119         }
120     };
121 
122     processSubgraph(testModel.main);
123     for (const TestSubgraph& subgraph : testModel.referenced) {
124         processSubgraph(subgraph);
125     }
126 #ifdef NNTEST_SLTS
127     return size == 0 ? nullptr : MemoryAHWB::create(nnapi, size);
128 #else
129     return size == 0 ? nullptr : MemoryAHWB::create(size);
130 #endif
131 }
132 
133 static void createModelFromSubgraph(const TestSubgraph& subgraph, bool testDynamicOutputShape,
134                                     const std::vector<TestSubgraph>& refSubgraphs,
135                                     const std::unique_ptr<MemoryAHWB>& memory,
136                                     uint32_t* memoryOffset, Model* model, Model* refModels) {
137     // Operands.
138     for (const auto& operand : subgraph.operands) {
139         auto type = getOperandType(operand, testDynamicOutputShape);
140         auto index = model->addOperand(&type);
141 
142         switch (operand.lifetime) {
143             case TestOperandLifeTime::CONSTANT_COPY: {
144                 model->setOperandValue(index, operand.data.get<void>(), operand.data.size());
145             } break;
146             case TestOperandLifeTime::CONSTANT_REFERENCE: {
147                 const uint32_t length = operand.data.size();
148                 std::memcpy(static_cast<uint8_t*>(memory->getPointer()) + *memoryOffset,
149                             operand.data.get<void>(), length);
150                 model->setOperandValueFromMemory(index, memory.get(), *memoryOffset, length);
151                 *memoryOffset += operand.data.alignedSize();
152             } break;
153             case TestOperandLifeTime::NO_VALUE: {
154                 model->setOperandValue(index, nullptr, 0);
155             } break;
156             case TestOperandLifeTime::SUBGRAPH: {
157                 uint32_t refIndex = *operand.data.get<uint32_t>();
158                 CHECK_LT(refIndex, refSubgraphs.size());
159                 const TestSubgraph& refSubgraph = refSubgraphs[refIndex];
160                 Model* refModel = &refModels[refIndex];
161 
162                 if (!refModel->isFinished()) {
163                     createModelFromSubgraph(refSubgraph, testDynamicOutputShape, refSubgraphs,
164                                             memory, memoryOffset, refModel, refModels);
165                     ASSERT_EQ(refModel->finish(), Result::NO_ERROR);
166                     ASSERT_TRUE(refModel->isValid());
167                 }
168                 model->setOperandValueFromModel(index, refModel);
169             } break;
170             case TestOperandLifeTime::SUBGRAPH_INPUT:
171             case TestOperandLifeTime::SUBGRAPH_OUTPUT:
172             case TestOperandLifeTime::TEMPORARY_VARIABLE: {
173                 // Nothing to do here.
174             } break;
175         }
176     }
177 
178     // Operations.
179     for (const auto& operation : subgraph.operations) {
180         model->addOperation(static_cast<int>(operation.type), operation.inputs, operation.outputs);
181     }
182 
183     // Inputs and outputs.
184     model->identifyInputsAndOutputs(subgraph.inputIndexes, subgraph.outputIndexes);
185 }
186 
187 #ifdef NNTEST_SLTS
188 void createModel(const NnApiSupportLibrary* nnapi, const TestModel& testModel,
189                  bool testDynamicOutputShape, GeneratedModel* model) {
190 #else
191 void createModel(const TestModel& testModel, bool testDynamicOutputShape, GeneratedModel* model) {
192 #endif
193     ASSERT_NE(nullptr, model);
194 
195 #ifdef NNTEST_SLTS
196     std::unique_ptr<MemoryAHWB> memory = createConstantReferenceMemory(nnapi, testModel);
197 #else
198     std::unique_ptr<MemoryAHWB> memory = createConstantReferenceMemory(testModel);
199 #endif
200     uint32_t memoryOffset = 0;
201 #ifdef NNTEST_SLTS
202     std::vector<Model> refModels;
203     refModels.reserve(testModel.referenced.size());
204     for (int i = 0; i < testModel.referenced.size(); ++i) {
205         refModels.push_back(Model(nnapi));
206     }
207 #else
208     std::vector<Model> refModels(testModel.referenced.size());
209 #endif
210     createModelFromSubgraph(testModel.main, testDynamicOutputShape, testModel.referenced, memory,
211                             &memoryOffset, model, refModels.data());
212     model->setRefModels(std::move(refModels));
213     model->setConstantReferenceMemory(std::move(memory));
214 
215     // Relaxed computation.
216     model->relaxComputationFloat32toFloat16(testModel.isRelaxed);
217 
218     if (!testModel.expectFailure) {
219         ASSERT_TRUE(model->isValid());
220     }
221 }
222 
223 void createRequest(const TestModel& testModel, Execution* execution,
224                    std::vector<TestBuffer>* outputs) {
225     ASSERT_NE(nullptr, execution);
226     ASSERT_NE(nullptr, outputs);
227 
228     // Model inputs.
229     for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
230         const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
231         ASSERT_EQ(Result::NO_ERROR,
232                   execution->setInput(i, operand.data.get<void>(), operand.data.size()));
233     }
234 
235     // Model outputs.
236     for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
237         const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
238 
239         // In the case of zero-sized output, we should at least provide a one-byte buffer.
240         // This is because zero-sized tensors are only supported internally to the runtime, or
241         // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
242         // tensor as model output. Otherwise, we will have two semantic conflicts:
243         // - "Zero dimension" conflicts with "unspecified dimension".
244         // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
245         const size_t bufferSize = std::max<size_t>(operand.data.size(), 1);
246 
247         outputs->emplace_back(bufferSize);
248         ASSERT_EQ(Result::NO_ERROR,
249                   execution->setOutput(i, outputs->back().getMutable<void>(), bufferSize));
250     }
251 }
252 
253 }  // namespace android::nn::generated_tests
254