1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <android-base/macros.h>
18 #include <android/sharedmem.h>
19 #include <gtest/gtest.h>
20 #include <sys/mman.h>
21
22 #include <memory>
23 #include <string>
24 #include <tuple>
25 #include <utility>
26 #include <vector>
27
28 #include "TestNeuralNetworksWrapper.h"
29
30 using namespace android::nn::test_wrapper;
31
32 namespace {
33
34 // We try the following model:
35 //
36 // op2 = ADD(op0, op1)
37 // op4 = TRANSPOSE(op2, op3)
38 //
39 // where op0 is a required model input, should be of dimension (A, B).
40 // op1 is a required constant, should be of dimension (A, 1).
41 // op2 is an internal operand, should be of dimension (A, B).
42 // op3 is an omitted optional constant / model input, should be of dimension (2).
43 // op4 is a model output, should be of dimension (B, A).
44 //
45 // For each operand, we test combinations of dimensions specification level during model
46 // construction time and execution time (if any). All other relevant combinations of the
47 // basic scenarios are then iterated over in TestAll. Note that we don't want to just use
48 // googletest's parametrized tests (TEST_P) as the 16k combinations generated too many
49 // lines of output for the test infrastructure to handle correctly.
50
51 // Which operand to test
52 enum class UnspecifiedOperand {
53 INPUT_MANDATORY,
54 CONST_MANDATORY,
55 TEMPORARY_VARIABLE,
56 INPUT_OPTIONAL,
57 CONST_OPTIONAL,
58 OUTPUT
59 };
60 // How well the dimensional information is specified
61 enum class SpecificationLevel {
62 FULLY_SPECIFIED, // all dimensions are clearly specified without any ambiguity
63 UNSPECIFIED_DIM, // certain dimension is set to 0 as unknown, but rank is well-specified
64 UNSPECIFIED_RANK, // rank is set to 0 as unknown, passing an empty vector for dims
65 UNSPECIFIED_TYPE // only during execution time, passing nullptr for operand type
66 };
67 using UnspecifiedDimensionsTestParam = std::tuple<UnspecifiedOperand,
68 SpecificationLevel, // model construction time
69 SpecificationLevel>; // execution time
70
71 // Indexing
72 constexpr uint32_t kIndex0_Model = 0; // op0, model
73 constexpr uint32_t kIndex1_Model = 1; // op1, model
74 constexpr uint32_t kIndex2_Model = 2; // op2, model
75 constexpr uint32_t kIndex3_Model = 3; // op3, model
76 constexpr uint32_t kIndex4_Model = 4; // op4, model
77 constexpr uint32_t kIndex0_Execution = 5; // op0, execution
78 constexpr uint32_t kIndex3_Execution = 6; // op3, execution
79 constexpr uint32_t kIndex4_Execution = 7; // op4, execution
80 constexpr uint32_t kIndexCount = 8; // count
81
82 constexpr int32_t kValueA = 0;
83 constexpr int32_t kValueB = 2;
84 constexpr uint32_t kDimAGood = 2;
85 constexpr uint32_t kDimABad = 3;
86
87 class UnspecifiedDimensionsTest : public ::testing::TestWithParam<UnspecifiedDimensionsTestParam> {
88 enum class OptionalType { CONST, INPUT }; // omitted operand op3 is an input or const
89 enum class BufferSize { LESS, EQUAL, MORE }; // only used for output buffer size
90 enum class OperandLocation { BUFFER, MEMORY }; // where the operand reside
91 enum class InOutType { INPUT, OUTPUT }; // parameter for setInOut()
92 // Whether input/output padding is implicitly disabled, enabled, or explicitly disabled
93 enum class PaddingEnabled { DEFAULT, ENABLED, DISABLED };
94
95 class SharedMemoryForTest {
96 public:
SharedMemoryForTest()97 SharedMemoryForTest() : memory(nullptr), fd(-1), buffer(nullptr) {}
~SharedMemoryForTest()98 ~SharedMemoryForTest() {
99 if (buffer != nullptr) {
100 munmap(buffer, kLength);
101 }
102 if (fd > -1) {
103 close(fd);
104 }
105 }
initialize(size_t size,const void * data)106 void initialize(size_t size, const void* data) {
107 fd = ASharedMemory_create(nullptr, kLength);
108 ASSERT_GT(fd, -1);
109 buffer = (uint8_t*)mmap(nullptr, kLength, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
110 ASSERT_NE(buffer, nullptr);
111 memcpy(buffer, data, size);
112 memory = std::make_shared<Memory>(kLength, PROT_READ | PROT_WRITE, fd, 0);
113 ASSERT_TRUE(memory->isValid());
114 }
getMemory() const115 const Memory* getMemory() const { return memory.get(); }
getBuffer() const116 const uint8_t* getBuffer() const { return buffer; }
117
118 private:
119 DISALLOW_COPY_AND_ASSIGN(SharedMemoryForTest);
120 std::shared_ptr<Memory> memory;
121 int fd;
122 uint8_t* buffer;
123 // Always allocate an ashmem of 64 bytes. This is large enough for all use cases.
124 static constexpr size_t kLength = 64;
125 };
126
toString(SpecificationLevel level)127 std::string toString(SpecificationLevel level) {
128 switch (level) {
129 case SpecificationLevel::FULLY_SPECIFIED:
130 return "FULLY_SPECIFIED";
131 case SpecificationLevel::UNSPECIFIED_DIM:
132 return "UNSPECIFIED_DIM";
133 case SpecificationLevel::UNSPECIFIED_RANK:
134 return "UNSPECIFIED_RANK";
135 case SpecificationLevel::UNSPECIFIED_TYPE:
136 return "UNSPECIFIED_TYPE";
137 default:
138 return "UNKNOWN";
139 }
140 }
141
toString(BufferSize b)142 std::string toString(BufferSize b) {
143 switch (b) {
144 case BufferSize::LESS:
145 return "LESS";
146 case BufferSize::EQUAL:
147 return "EQUAL";
148 case BufferSize::MORE:
149 return "MORE";
150 default:
151 return "UNKNOWN";
152 }
153 }
154
toString(OperandLocation loc)155 std::string toString(OperandLocation loc) {
156 switch (loc) {
157 case OperandLocation::BUFFER:
158 return "BUFFER";
159 case OperandLocation::MEMORY:
160 return "MEMORY";
161 default:
162 return "UNKNOWN";
163 }
164 }
165
toString(PaddingEnabled enabled)166 std::string toString(PaddingEnabled enabled) {
167 switch (enabled) {
168 case PaddingEnabled::DEFAULT:
169 return "DEFAULT";
170 case PaddingEnabled::ENABLED:
171 return "ENABLED";
172 case PaddingEnabled::DISABLED:
173 return "DISABLED";
174 default:
175 return "UNKNOWN";
176 }
177 }
178
179 protected:
SetUp()180 virtual void SetUp() {
181 uint32_t modelIndex, executionIndex;
182 switch (kUnspecifiedOperand) {
183 case UnspecifiedOperand::INPUT_MANDATORY:
184 modelIndex = kIndex0_Model;
185 executionIndex = kIndex0_Execution;
186 mBadIndexChoices = {kIndexCount, modelIndex, executionIndex};
187 mOperandLocationChoices = {OperandLocation::BUFFER, OperandLocation::MEMORY};
188 mBufferSizeChoices = {BufferSize::LESS, BufferSize::EQUAL, BufferSize::MORE};
189 mEnablePaddingChoices = {PaddingEnabled::DEFAULT, PaddingEnabled::ENABLED,
190 PaddingEnabled::DISABLED};
191 break;
192 case UnspecifiedOperand::CONST_MANDATORY:
193 modelIndex = kIndex1_Model;
194 executionIndex = kIndexCount;
195 mBadIndexChoices = {kIndexCount, modelIndex};
196 mOperandLocationChoices = {OperandLocation::BUFFER, OperandLocation::MEMORY};
197 break;
198 case UnspecifiedOperand::TEMPORARY_VARIABLE:
199 modelIndex = kIndex2_Model;
200 executionIndex = kIndexCount;
201 mBadIndexChoices = {kIndexCount, modelIndex};
202 mOperandLocationChoices = {OperandLocation::BUFFER};
203 break;
204 case UnspecifiedOperand::INPUT_OPTIONAL:
205 modelIndex = kIndex3_Model;
206 executionIndex = kIndex3_Execution;
207 mBadIndexChoices = {kIndexCount};
208 mOptionalType = OptionalType::INPUT;
209 mOperandLocationChoices = {OperandLocation::BUFFER};
210 break;
211 case UnspecifiedOperand::CONST_OPTIONAL:
212 modelIndex = kIndex3_Model;
213 executionIndex = kIndexCount;
214 mBadIndexChoices = {kIndexCount};
215 mOperandLocationChoices = {OperandLocation::BUFFER};
216 break;
217 case UnspecifiedOperand::OUTPUT:
218 modelIndex = kIndex4_Model;
219 executionIndex = kIndex4_Execution;
220 mBadIndexChoices = {kIndexCount, modelIndex, executionIndex};
221 mOperandLocationChoices = {OperandLocation::BUFFER, OperandLocation::MEMORY};
222 mBufferSizeChoices = {BufferSize::LESS, BufferSize::EQUAL, BufferSize::MORE};
223 mEnablePaddingChoices = {PaddingEnabled::DEFAULT, PaddingEnabled::ENABLED,
224 PaddingEnabled::DISABLED};
225 break;
226 default:
227 break;
228 }
229 std::vector<SpecificationLevel> levels{
230 SpecificationLevel::UNSPECIFIED_DIM, SpecificationLevel::FULLY_SPECIFIED,
231 SpecificationLevel::UNSPECIFIED_DIM, SpecificationLevel::FULLY_SPECIFIED,
232 SpecificationLevel::UNSPECIFIED_DIM, SpecificationLevel::FULLY_SPECIFIED,
233 SpecificationLevel::FULLY_SPECIFIED, SpecificationLevel::FULLY_SPECIFIED};
234 levels[modelIndex] = kSpecificationLevelModel;
235 if (executionIndex < kIndexCount) {
236 levels[executionIndex] = kSpecificationLevelExecution;
237 }
238 mSpecificationLevels = std::move(levels);
239 }
240
getType(uint32_t index,const std::vector<uint32_t> & dim)241 OperandType getType(uint32_t index, const std::vector<uint32_t>& dim) {
242 const SpecificationLevel l = mSpecificationLevels[index];
243 std::vector<uint32_t> setDim;
244 if (l != SpecificationLevel::UNSPECIFIED_RANK) {
245 for (auto d : dim) {
246 if (d == 0) {
247 setDim.push_back(mBadIndex != index ? kDimAGood : kDimABad);
248 } else {
249 setDim.push_back(l == SpecificationLevel::FULLY_SPECIFIED ? d : 0);
250 }
251 }
252 }
253 float scale = mOperandTypes[index] == Type::TENSOR_QUANT8_ASYMM ? 1.0 : 0.0;
254 return OperandType(mOperandTypes[index], setDim, scale, 0);
255 }
256
getSize(uint32_t index,const std::vector<uint32_t> & dim,BufferSize s=BufferSize::EQUAL)257 uint32_t getSize(uint32_t index, const std::vector<uint32_t>& dim,
258 BufferSize s = BufferSize::EQUAL) {
259 uint32_t n = 1;
260 for (auto d : dim) {
261 n *= (d == 0 ? (mBadIndex != index ? kDimAGood : kDimABad) : d);
262 }
263 if (s == BufferSize::LESS) {
264 n /= 2;
265 } else if (s == BufferSize::MORE) {
266 n *= 2;
267 }
268 return n;
269 };
270
271 template <typename T>
setInOut(Execution * execution,uint32_t index,uint32_t opIndex,const std::vector<uint32_t> & dim,void * buffer,const SharedMemoryForTest * memory,InOutType inOutType,BufferSize bufferSize=BufferSize::EQUAL)272 Result setInOut(Execution* execution, uint32_t index, uint32_t opIndex,
273 const std::vector<uint32_t>& dim, void* buffer,
274 const SharedMemoryForTest* memory, InOutType inOutType,
275 BufferSize bufferSize = BufferSize::EQUAL) {
276 const auto kLevel = mSpecificationLevels[index];
277 size_t size = (buffer == nullptr) ? 0 : getSize(index, dim, bufferSize) * sizeof(T);
278 auto type = getType(index, dim);
279 ANeuralNetworksOperandType* t =
280 (kLevel == SpecificationLevel::UNSPECIFIED_TYPE) ? nullptr : &type.operandType;
281 if (mOperandLocation == OperandLocation::MEMORY && memory != nullptr) {
282 if (inOutType == InOutType::INPUT) {
283 return execution->setInputFromMemory(opIndex, memory->getMemory(), 0, size, t);
284 } else {
285 return execution->setOutputFromMemory(opIndex, memory->getMemory(), 0, size, t);
286 }
287 } else {
288 if (inOutType == InOutType::INPUT) {
289 return execution->setInput(opIndex, buffer, size, t);
290 } else {
291 return execution->setOutput(opIndex, buffer, size, t);
292 }
293 }
294 return Result::NO_ERROR;
295 }
296
297 template <typename T, Type TensorType>
TestOne()298 void TestOne() {
299 // Phase 1: Build Model
300 Model model;
301 auto type0 = getType(kIndex0_Model, {kValueA, kValueB});
302 auto type1 = getType(kIndex1_Model, {kValueA, 1});
303 auto type2 = getType(kIndex2_Model, {kValueA, kValueB});
304 auto type3 = getType(kIndex3_Model, {2});
305 auto type4 = getType(kIndex4_Model, {kValueB, kValueA});
306 OperandType typeActivation(Type::INT32, {}); // activation
307
308 auto op0 = model.addOperand(&type0);
309 auto op1 = model.addOperand(&type1);
310 auto op2 = model.addOperand(&type2);
311 auto op3 = model.addOperand(&type3);
312 auto op4 = model.addOperand(&type4);
313 auto act = model.addOperand(&typeActivation);
314
315 T bufferOp1[2] = {1, 2};
316 SharedMemoryForTest memoryOp1;
317 memoryOp1.initialize(sizeof(bufferOp1), bufferOp1);
318 if (mOperandLocation == OperandLocation::BUFFER) {
319 model.setOperandValue(op1, bufferOp1, sizeof(bufferOp1));
320 } else {
321 model.setOperandValueFromMemory(op1, memoryOp1.getMemory(), 0, sizeof(bufferOp1));
322 }
323 int32_t kActivation = 0;
324 model.setOperandValue(act, &kActivation, sizeof(int32_t));
325 if (mOptionalType == OptionalType::CONST) {
326 model.setOperandValue(op3, nullptr, 0);
327 }
328
329 model.addOperation(ANEURALNETWORKS_ADD, {op0, op1, act}, {op2});
330 model.addOperation(ANEURALNETWORKS_TRANSPOSE, {op2, op3}, {op4});
331 if (mOptionalType == OptionalType::CONST) {
332 model.identifyInputsAndOutputs({op0}, {op4});
333 } else {
334 model.identifyInputsAndOutputs({op0, op3}, {op4});
335 }
336
337 bool expected = expectModelIsValid();
338 ASSERT_EQ(model.isValid(), expected);
339 Result result = model.finish();
340 if (expected) {
341 ASSERT_EQ(result, Result::NO_ERROR);
342 } else {
343 // There is no contract (yet) for specific errors in NeuralNetworks.h,
344 // so we just assert on not being successful.
345 ASSERT_NE(result, Result::NO_ERROR);
346 return;
347 }
348
349 // Phase 2: Compile Model, should always pass
350 Compilation compilation(&model);
351 ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
352
353 std::vector<uint32_t> valueBChoices = {1, 2};
354 for (const auto valueB : valueBChoices) {
355 SCOPED_TRACE("ValueB: " + std::to_string(valueB));
356 if (valueB != kValueB &&
357 (mSpecificationLevels[kIndex0_Model] == SpecificationLevel::FULLY_SPECIFIED ||
358 mSpecificationLevels[kIndex2_Model] == SpecificationLevel::FULLY_SPECIFIED ||
359 mSpecificationLevels[kIndex4_Model] == SpecificationLevel::FULLY_SPECIFIED)) {
360 continue;
361 }
362
363 // Phase 3: Set Execution Input/Output
364 Execution execution(&compilation);
365
366 // Enable padding
367 if (mEnablePadding == PaddingEnabled::ENABLED) {
368 ASSERT_EQ(execution.enableInputAndOutputPadding(true), Result::NO_ERROR);
369 } else if (mEnablePadding == PaddingEnabled::DISABLED) {
370 ASSERT_EQ(execution.enableInputAndOutputPadding(false), Result::NO_ERROR);
371 }
372
373 // Set input0
374 Result result;
375 T bufferOp0[6] = {1, 2, 3, 4, 5, 6};
376 SharedMemoryForTest memoryOp0;
377 memoryOp0.initialize(sizeof(bufferOp0), bufferOp0);
378 result = setInOut<T>(&execution, kIndex0_Execution, 0, {kValueA, valueB}, bufferOp0,
379 &memoryOp0, InOutType::INPUT, mBufferSize);
380 ASSERT_EQ(result, expectSetInput0());
381 if (result != Result::NO_ERROR) continue;
382
383 // Set input1, omitted
384 if (mOptionalType == OptionalType::INPUT) {
385 result = setInOut<T>(&execution, kIndex3_Execution, 1, {2}, nullptr, nullptr,
386 InOutType::INPUT);
387 ASSERT_EQ(result, expectSetInput1());
388 if (result != Result::NO_ERROR) continue;
389 }
390
391 // Set output0
392 T bufferOp4[16];
393 SharedMemoryForTest memoryOp4;
394 memoryOp4.initialize(sizeof(bufferOp4), bufferOp4);
395 result = setInOut<T>(&execution, kIndex4_Execution, 0, {valueB, kValueA}, bufferOp4,
396 &memoryOp4, InOutType::OUTPUT, mBufferSize);
397 ASSERT_EQ(result, expectSetOutput0());
398 if (result != Result::NO_ERROR) continue;
399
400 // Phase 4: Compute and Compare Results
401 result = execution.compute();
402 ASSERT_EQ(result, expectCompute());
403 if (result == Result::OP_FAILED) continue;
404
405 std::vector<uint32_t> outputShape;
406 ASSERT_EQ(execution.getOutputOperandDimensions(0, &outputShape), result);
407 std::vector<uint32_t> expectedOutputShape = {valueB, kDimAGood};
408 ASSERT_EQ(outputShape, expectedOutputShape);
409 if (result == Result::OUTPUT_INSUFFICIENT_SIZE) continue;
410
411 const T* outputBuffer = mOperandLocation == OperandLocation::MEMORY
412 ? reinterpret_cast<const T*>(memoryOp4.getBuffer())
413 : bufferOp4;
414 T expected_1x2[2] = {2, 4};
415 T expected_2x2[4] = {2, 5, 3, 6};
416 for (uint32_t i = 0; i < kDimAGood * valueB; i++) {
417 ASSERT_EQ(outputBuffer[i], valueB == 1 ? expected_1x2[i] : expected_2x2[i]);
418 }
419 }
420 }
421
422 // Expect invalid model for the following cases
423 // - op1 is not fully specified (const operand must be fully specified)
424 // - op1 has bad dimension value (const operand size is checked with buffer size)
expectModelIsValid()425 bool expectModelIsValid() {
426 const auto kLevel1_Model = mSpecificationLevels[kIndex1_Model];
427 if (kLevel1_Model != SpecificationLevel::FULLY_SPECIFIED || mBadIndex == kIndex1_Model) {
428 return false;
429 }
430 return true;
431 }
432
433 // Expect BAD_DATA on input0 for the following cases
434 // - the provided type is not fully specified
435 // - the provided type does not agree with the type set at model construction time
436 // - no type is provided and the type is not fully specified at model construction time
437 // - the buffer size (length) is less than needed
438 // - the buffer size (length) is more than needed and padding is not enabled
expectSetInput0()439 Result expectSetInput0() {
440 const auto kLevel0_Model = mSpecificationLevels[kIndex0_Model];
441 const auto kLevel0_Execution = mSpecificationLevels[kIndex0_Execution];
442 switch (kLevel0_Execution) {
443 case SpecificationLevel::UNSPECIFIED_DIM:
444 case SpecificationLevel::UNSPECIFIED_RANK:
445 return Result::BAD_DATA;
446 case SpecificationLevel::FULLY_SPECIFIED:
447 if ((mBadIndex == kIndex0_Execution || mBadIndex == kIndex0_Model) &&
448 kLevel0_Model != SpecificationLevel::UNSPECIFIED_RANK) {
449 return Result::BAD_DATA;
450 }
451 if (mBufferSize == BufferSize::LESS) {
452 return Result::BAD_DATA;
453 }
454 if (mEnablePadding != PaddingEnabled::ENABLED && mBufferSize == BufferSize::MORE) {
455 return Result::BAD_DATA;
456 }
457 break;
458 case SpecificationLevel::UNSPECIFIED_TYPE:
459 if (kLevel0_Model == SpecificationLevel::UNSPECIFIED_DIM ||
460 kLevel0_Model == SpecificationLevel::UNSPECIFIED_RANK) {
461 return Result::BAD_DATA;
462 }
463 if (mBufferSize == BufferSize::LESS) {
464 return Result::BAD_DATA;
465 }
466 if (mEnablePadding != PaddingEnabled::ENABLED && mBufferSize == BufferSize::MORE) {
467 return Result::BAD_DATA;
468 }
469 // This is the case when the dimension is incorrectly specified in the model.
470 // With incorrect dimension, the needed size is 2 * 3 = 6 data type size.
471 // BufferSize::EQUAL (2 * 2 = 4 data type size) cannot provide enough length.
472 if (mBadIndex == kIndex0_Model && mBufferSize == BufferSize::EQUAL) {
473 return Result::BAD_DATA;
474 }
475 break;
476 default:
477 break;
478 }
479 return Result::NO_ERROR;
480 }
481
482 // Expect BAD_DATA on input1 for the following cases
483 // - the provided type is less detailed as the type set at model construction time
expectSetInput1()484 Result expectSetInput1() {
485 const auto kLevel3_Model = mSpecificationLevels[kIndex3_Model];
486 const auto kLevel3_Execution = mSpecificationLevels[kIndex3_Execution];
487 switch (kLevel3_Execution) {
488 case SpecificationLevel::UNSPECIFIED_DIM:
489 if (kLevel3_Model == SpecificationLevel::FULLY_SPECIFIED) {
490 return Result::BAD_DATA;
491 }
492 break;
493 case SpecificationLevel::UNSPECIFIED_RANK:
494 if (kLevel3_Model != SpecificationLevel::UNSPECIFIED_RANK) {
495 return Result::BAD_DATA;
496 }
497 break;
498 default:
499 break;
500 }
501 return Result::NO_ERROR;
502 }
503
504 // Expect BAD_DATA on output0 for the following cases
505 // - the provided type is less detailed as the type set at model construction time
506 // - the provided type does not agree with the type set at model construction time
507 // - the buffer size (length) is less than needed
508 // - the buffer size (length) is more than needed and padding is not enabled
expectSetOutput0()509 Result expectSetOutput0() {
510 const auto kLevel4_Model = mSpecificationLevels[kIndex4_Model];
511 const auto kLevel4_Execution = mSpecificationLevels[kIndex4_Execution];
512 switch (kLevel4_Execution) {
513 case SpecificationLevel::UNSPECIFIED_DIM:
514 if (kLevel4_Model == SpecificationLevel::FULLY_SPECIFIED ||
515 (kLevel4_Model == SpecificationLevel::UNSPECIFIED_DIM &&
516 (mBadIndex == kIndex4_Model || mBadIndex == kIndex4_Execution))) {
517 return Result::BAD_DATA;
518 }
519 break;
520 case SpecificationLevel::UNSPECIFIED_RANK:
521 if (kLevel4_Model != SpecificationLevel::UNSPECIFIED_RANK) {
522 return Result::BAD_DATA;
523 }
524 break;
525 case SpecificationLevel::FULLY_SPECIFIED:
526 if ((mBadIndex == kIndex4_Model || mBadIndex == kIndex4_Execution) &&
527 kLevel4_Model != SpecificationLevel::UNSPECIFIED_RANK) {
528 return Result::BAD_DATA;
529 }
530 if (mBufferSize == BufferSize::LESS) {
531 return Result::BAD_DATA;
532 }
533 if (mEnablePadding != PaddingEnabled::ENABLED && mBufferSize == BufferSize::MORE) {
534 return Result::BAD_DATA;
535 }
536 break;
537 case SpecificationLevel::UNSPECIFIED_TYPE:
538 if (kLevel4_Model == SpecificationLevel::FULLY_SPECIFIED) {
539 if (mBufferSize == BufferSize::LESS) {
540 return Result::BAD_DATA;
541 }
542 if (mEnablePadding != PaddingEnabled::ENABLED &&
543 mBufferSize == BufferSize::MORE) {
544 return Result::BAD_DATA;
545 }
546 // This is the case when the dimension is incorrectly specified in the model.
547 // With incorrect dimension, the needed size is 2 * 3 = 6 data type size.
548 // BufferSize::EQUAL (2 * 2 = 4 data type size) cannot provide enough length.
549 if (mBadIndex == kIndex4_Model && mBufferSize == BufferSize::EQUAL) {
550 return Result::BAD_DATA;
551 }
552 }
553 break;
554 default:
555 break;
556 }
557 return Result::NO_ERROR;
558 }
559
560 // Expect failure for the following cases
561 // - one of the operands has bad dimension -> OP_FAILED
562 // - insufficient output buffer -> OUTPUT_INSUFFICIENT_SIZE
expectCompute()563 Result expectCompute() {
564 if (mBadIndex < 8) {
565 return Result::OP_FAILED;
566 } else if (mBufferSize == BufferSize::LESS) {
567 return Result::OUTPUT_INSUFFICIENT_SIZE;
568 }
569 return Result::NO_ERROR;
570 }
571
572 // Iterate over combinations of
573 // - mBadIndexChoices: which operand has incorrect dimension
574 // - mOperandLocationChoices: where the operand reside, buffer or shared memory
575 // - mBufferSizeChoices: whether the provided buffer/memory size is sufficient
576 // - mEnablePaddingChoices: whether input/output memory padding is enabled
577 template <typename T, Type TensorType>
TestAll()578 void TestAll() {
579 SCOPED_TRACE("Model: " + toString(kSpecificationLevelModel));
580 SCOPED_TRACE("Execution: " + toString(kSpecificationLevelExecution));
581 mOperandTypes = {TensorType, TensorType, TensorType, Type::TENSOR_INT32,
582 TensorType, TensorType, Type::TENSOR_INT32, TensorType};
583 for (const auto kBadIndex : mBadIndexChoices) {
584 mBadIndex = kBadIndex;
585 SCOPED_TRACE("Bad Index: " + std::to_string(mBadIndex));
586 if (mBadIndex < 8 &&
587 (mSpecificationLevels[mBadIndex] == SpecificationLevel::UNSPECIFIED_RANK ||
588 mSpecificationLevels[mBadIndex] == SpecificationLevel::UNSPECIFIED_TYPE)) {
589 continue;
590 }
591 for (const auto kOperandLocation : mOperandLocationChoices) {
592 mOperandLocation = kOperandLocation;
593 SCOPED_TRACE("Operand Location: " + toString(mOperandLocation));
594 for (const auto kBufferSize : mBufferSizeChoices) {
595 mBufferSize = kBufferSize;
596 SCOPED_TRACE("Buffer Size: " + toString(mBufferSize));
597 for (const auto kEnablePadding : mEnablePaddingChoices) {
598 mEnablePadding = kEnablePadding;
599 SCOPED_TRACE("Enable Padding: " + toString(mEnablePadding));
600 TestOne<T, TensorType>();
601 }
602 }
603 }
604 }
605 }
606
607 const UnspecifiedOperand kUnspecifiedOperand = std::get<0>(GetParam());
608 const SpecificationLevel kSpecificationLevelModel = std::get<1>(GetParam());
609 const SpecificationLevel kSpecificationLevelExecution = std::get<2>(GetParam());
610
611 std::vector<SpecificationLevel> mSpecificationLevels;
612 std::vector<Type> mOperandTypes;
613 OptionalType mOptionalType = OptionalType::CONST;
614
615 // Iterate all combinations in TestAll()
616 std::vector<uint32_t> mBadIndexChoices;
617 std::vector<OperandLocation> mOperandLocationChoices;
618 std::vector<BufferSize> mBufferSizeChoices = {BufferSize::EQUAL};
619 std::vector<PaddingEnabled> mEnablePaddingChoices = {PaddingEnabled::DEFAULT};
620
621 uint32_t mBadIndex;
622 OperandLocation mOperandLocation;
623 BufferSize mBufferSize;
624 PaddingEnabled mEnablePadding;
625 };
626
TEST_P(UnspecifiedDimensionsTest,Float32)627 TEST_P(UnspecifiedDimensionsTest, Float32) {
628 TestAll<float, Type::TENSOR_FLOAT32>();
629 }
630
TEST_P(UnspecifiedDimensionsTest,Quant8)631 TEST_P(UnspecifiedDimensionsTest, Quant8) {
632 TestAll<uint8_t, Type::TENSOR_QUANT8_ASYMM>();
633 }
634
TEST_P(UnspecifiedDimensionsTest,Float16)635 TEST_P(UnspecifiedDimensionsTest, Float16) {
636 TestAll<_Float16, Type::TENSOR_FLOAT16>();
637 }
638
639 static const auto kAllSpecificationLevelsModel =
640 testing::Values(SpecificationLevel::FULLY_SPECIFIED, SpecificationLevel::UNSPECIFIED_DIM,
641 SpecificationLevel::UNSPECIFIED_RANK);
642 static const auto kAllSpecificationLevelsExecution =
643 testing::Values(SpecificationLevel::FULLY_SPECIFIED, SpecificationLevel::UNSPECIFIED_DIM,
644 SpecificationLevel::UNSPECIFIED_RANK, SpecificationLevel::UNSPECIFIED_TYPE);
645 static const auto kFullySpecified = testing::Values(SpecificationLevel::FULLY_SPECIFIED);
646
647 INSTANTIATE_TEST_SUITE_P(ModelInputTest, UnspecifiedDimensionsTest,
648 testing::Combine(testing::Values(UnspecifiedOperand::INPUT_MANDATORY),
649 kAllSpecificationLevelsModel,
650 kAllSpecificationLevelsExecution));
651
652 INSTANTIATE_TEST_SUITE_P(ConstantParameterTest, UnspecifiedDimensionsTest,
653 testing::Combine(testing::Values(UnspecifiedOperand::CONST_MANDATORY),
654 kAllSpecificationLevelsModel, kFullySpecified));
655
656 INSTANTIATE_TEST_SUITE_P(TemporaryVariableTest, UnspecifiedDimensionsTest,
657 testing::Combine(testing::Values(UnspecifiedOperand::TEMPORARY_VARIABLE),
658 kAllSpecificationLevelsModel, kFullySpecified));
659
660 INSTANTIATE_TEST_SUITE_P(OptionalConstantTest, UnspecifiedDimensionsTest,
661 testing::Combine(testing::Values(UnspecifiedOperand::CONST_OPTIONAL),
662 kAllSpecificationLevelsModel, kFullySpecified));
663
664 INSTANTIATE_TEST_SUITE_P(OptionalInputTest, UnspecifiedDimensionsTest,
665 testing::Combine(testing::Values(UnspecifiedOperand::INPUT_OPTIONAL),
666 kAllSpecificationLevelsModel,
667 kAllSpecificationLevelsExecution));
668
669 INSTANTIATE_TEST_SUITE_P(ModelOutputTest, UnspecifiedDimensionsTest,
670 testing::Combine(testing::Values(UnspecifiedOperand::OUTPUT),
671 kAllSpecificationLevelsModel,
672 kAllSpecificationLevelsExecution));
673
674 } // end namespace
675