1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Operations"
18
19 #include <algorithm>
20 #include <cfloat>
21 #include <cmath>
22 #include <memory>
23 #include <vector>
24
25 #include "OperationResolver.h"
26 #include "Tracing.h"
27
28 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
29 #include <tensorflow/lite/kernels/internal/common.h>
30
31 #include "CpuOperationUtils.h"
32 #endif // NN_INCLUDE_CPU_IMPLEMENTATION
33
34 namespace android {
35 namespace nn {
36 namespace transpose_conv_2d {
37
38 constexpr char kOperationName[] = "TRANSPOSE_CONV_2D";
39
40 constexpr uint32_t kInputTensor = 0;
41 constexpr uint32_t kFilterTensor = 1;
42 constexpr uint32_t kBiasTensor = 2;
43
44 constexpr uint32_t kNumInputs1 = 9;
45 constexpr uint32_t kNumInputs2 = 11;
46 constexpr uint32_t kNumOutputs = 1;
47 constexpr uint32_t kOutputTensor = 0;
48
49 namespace {
50
51 // If possible we will use this static buffer for the tensor.
52 constexpr size_t kStaticBufferSize = 1605632;
53 char static_scratch_buffer[kStaticBufferSize];
54
55 // executionMutex is used to protect concurrent access of the static_scratch_buffer.
56 // std::mutex is safe for pthreads on Android.
57 std::mutex executionMutex;
58
59 struct TransposeConv2dParam {
60 int32_t paddingLeft, paddingRight;
61 int32_t paddingTop, paddingBottom;
62 int32_t strideWidth, strideHeight;
63 int32_t activation;
64 bool useNchw = false;
65
initializeandroid::nn::transpose_conv_2d::__anon57f3627b0110::TransposeConv2dParam66 bool initialize(const IOperationExecutionContext* context) {
67 uint32_t inCount = context->getNumInputs();
68 int32_t paddingImplicit = 0;
69 if (inCount == 9) {
70 paddingImplicit = context->getInputValue<int32_t>(4);
71 strideWidth = context->getInputValue<int32_t>(5);
72 strideHeight = context->getInputValue<int32_t>(6);
73 activation = context->getInputValue<int32_t>(7);
74 useNchw = context->getInputValue<bool>(8);
75 Shape filterShape = context->getInputShape(kFilterTensor);
76 int32_t filterWidth = getSizeOfDimension(filterShape, 2);
77 int32_t filterHeight = getSizeOfDimension(filterShape, 1);
78 NN_RET_CHECK_EQ(getNumberOfDimensions(context->getInputShape(3)), 1);
79 NN_RET_CHECK_EQ(getSizeOfDimension(context->getInputShape(3), 0), 4);
80 const int32_t* outputShapeData = context->getInputBuffer<int32_t>(3);
81 int32_t outputWidth = useNchw ? outputShapeData[3] : outputShapeData[2];
82 int32_t outputHeight = useNchw ? outputShapeData[2] : outputShapeData[1];
83 calculateExplicitPaddingTransposeConv(outputWidth, strideWidth, filterWidth,
84 paddingImplicit, &paddingLeft, &paddingRight);
85 calculateExplicitPaddingTransposeConv(outputHeight, strideHeight, filterHeight,
86 paddingImplicit, &paddingTop, &paddingBottom);
87 } else if (inCount == 11) {
88 paddingLeft = context->getInputValue<int32_t>(3);
89 paddingRight = context->getInputValue<int32_t>(4);
90 paddingTop = context->getInputValue<int32_t>(5);
91 paddingBottom = context->getInputValue<int32_t>(6);
92 strideWidth = context->getInputValue<int32_t>(7);
93 strideHeight = context->getInputValue<int32_t>(8);
94 activation = context->getInputValue<int32_t>(9);
95 useNchw = context->getInputValue<bool>(10);
96 } else {
97 NN_RET_CHECK_FAIL() << "Unsupported input spec for operation " << kOperationName;
98 }
99 // paddingRight and paddingBottom in transpose conv may be less than 0 to resolve the
100 // ambiguous output shape issue in the case of stride > 1.
101 NN_RET_CHECK_GE(paddingLeft, 0);
102 NN_RET_CHECK_GE(paddingTop, 0);
103 NN_RET_CHECK_GT(strideWidth, 0);
104 NN_RET_CHECK_GT(strideHeight, 0);
105 NN_RET_CHECK_GE(activation, 0);
106 return true;
107 }
108 };
109
110 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
111 #define ANDROID_NN_TRANSPOSE_CONV_PARAMETERS \
112 uint32_t numBatches = getSizeOfDimension(inputShape, 0); \
113 uint32_t inputHeight = getSizeOfDimension(inputShape, 1); \
114 uint32_t inputWidth = getSizeOfDimension(inputShape, 2); \
115 uint32_t inputDepth = getSizeOfDimension(inputShape, 3); \
116 uint32_t filterHeight = getSizeOfDimension(filterShape, 1); \
117 uint32_t filterWidth = getSizeOfDimension(filterShape, 2); \
118 uint32_t outputHeight = getSizeOfDimension(outputShape, 1); \
119 uint32_t outputWidth = getSizeOfDimension(outputShape, 2); \
120 uint32_t outputDepth = getSizeOfDimension(outputShape, 3); \
121 int32_t paddingLeft = param.paddingLeft, paddingRight = param.paddingRight; \
122 int32_t paddingTop = param.paddingTop, paddingBottom = param.paddingBottom; \
123 int32_t strideWidth = param.strideWidth, strideHeight = param.strideHeight; \
124 int32_t activation = param.activation;
125
transposeConvNhwc(const float * inputData,const Shape & inputShape,const float * filterData,const Shape & filterShape,const float * biasData,const Shape & biasShape,const TransposeConv2dParam & param,float * outputData,const Shape & outputShape)126 bool transposeConvNhwc(const float* inputData, const Shape& inputShape, const float* filterData,
127 const Shape& filterShape, const float* biasData, const Shape& biasShape,
128 const TransposeConv2dParam& param, float* outputData,
129 const Shape& outputShape) {
130 NNTRACE_TRANS("transposeConvFloat32");
131 ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
132
133 float outputActivationMin = 0.0f, outputActivationMax = 0.0f;
134 CalculateActivationRangeFloat(activation, &outputActivationMin, &outputActivationMax);
135
136 memset(outputData, 0, getNumberOfElements(outputShape) * sizeof(float));
137
138 const float* inputBase = inputData;
139 float* outputBase = outputData;
140 for (uint32_t b = 0; b < numBatches; b++) {
141 for (uint32_t h = 0; h < inputHeight; h++) {
142 for (uint32_t w = 0; w < inputWidth; w++) {
143 int32_t wOutputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft;
144 int32_t hOutputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop;
145
146 const float* filterBase = filterData;
147 for (uint32_t k = 0; k < outputDepth; k++) {
148 for (uint32_t i = 0; i < filterHeight; i++) {
149 for (uint32_t j = 0; j < filterWidth; j++, filterBase += inputDepth) {
150 int32_t hOutput = hOutputOrigin + static_cast<int32_t>(i);
151 int32_t wOutput = wOutputOrigin + static_cast<int32_t>(j);
152 if (hOutput >= 0 && hOutput < static_cast<int32_t>(outputHeight) &&
153 wOutput >= 0 && wOutput < static_cast<int32_t>(outputWidth)) {
154 for (uint32_t d = 0; d < inputDepth; d++) {
155 uint32_t outputIndex = hOutput * outputWidth * outputDepth +
156 wOutput * outputDepth + k;
157 outputBase[outputIndex] += inputBase[d] * filterBase[d];
158 }
159 }
160 }
161 }
162 }
163
164 inputBase += inputDepth;
165 }
166 }
167 outputBase += outputHeight * outputWidth * outputDepth;
168 }
169
170 const uint32_t outerSize = numBatches * outputHeight * outputWidth;
171 float* outPtr = outputData;
172 for (uint32_t i = 0; i < outerSize; i++) {
173 for (uint32_t d = 0; d < outputDepth; d++, outPtr++) {
174 *outPtr += biasData[d];
175 *outPtr = std::max(std::min(*outPtr, outputActivationMax), outputActivationMin);
176 }
177 }
178
179 return true;
180 }
181
182 template <typename T>
transposeConvNhwc(const T * inputData,const Shape & inputShape,const T * filterData,const Shape & filterShape,const int32_t * biasData,const Shape & biasShape,const TransposeConv2dParam & param,T * outputData,const Shape & outputShape)183 bool transposeConvNhwc(const T* inputData, const Shape& inputShape, const T* filterData,
184 const Shape& filterShape, const int32_t* biasData, const Shape& biasShape,
185 const TransposeConv2dParam& param, T* outputData, const Shape& outputShape) {
186 NNTRACE_TRANS("transposeConvQuant8");
187 ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
188
189 int32_t* tempBuffer = nullptr;
190 std::unique_ptr<int32_t[]> bufferGuard;
191 uint32_t tempBufferByteSize = getNumberOfElements(outputShape) * sizeof(int32_t);
192 if (tempBufferByteSize <= kStaticBufferSize) {
193 tempBuffer = reinterpret_cast<int32_t*>(static_scratch_buffer);
194 } else {
195 tempBuffer = new (std::nothrow) int32_t[tempBufferByteSize / sizeof(int32_t)];
196 if (tempBuffer == nullptr) {
197 LOG(ERROR) << "ConvTranspose size is too large, not enough memory";
198 return false;
199 }
200 bufferGuard.reset(tempBuffer);
201 }
202
203 int32_t inputOffset = -inputShape.offset;
204 int32_t filterOffset = -filterShape.offset;
205 int32_t outputOffset = outputShape.offset;
206
207 double realMultiplier = 0.0;
208 int32_t outputMultiplier = 0;
209 int32_t outputShift = 0;
210 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape,
211 &realMultiplier));
212 int exponent;
213 NN_RET_CHECK(QuantizeMultiplier(realMultiplier, &outputMultiplier, &exponent));
214 outputShift = -exponent;
215
216 int32_t outputActivationMin = 0, outputActivationMax = 0;
217 CalculateActivationRange<T>(activation, outputShape, &outputActivationMin,
218 &outputActivationMax);
219
220 // Prevent concurrent executions that may access the scratch buffer
221 std::unique_lock<std::mutex> lock(executionMutex);
222 memset(tempBuffer, 0, tempBufferByteSize);
223
224 const T* inputPtr = inputData;
225 int32_t* outputBase = tempBuffer;
226 for (uint32_t b = 0; b < numBatches; b++) {
227 for (uint32_t h = 0; h < inputHeight; h++) {
228 for (uint32_t w = 0; w < inputWidth; w++) {
229 for (uint32_t d = 0; d < inputDepth; d++) {
230 int32_t wOutputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft;
231 int32_t hOutputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop;
232
233 for (uint32_t i = 0; i < filterHeight; i++) {
234 for (uint32_t j = 0; j < filterWidth; j++) {
235 for (uint32_t k = 0; k < outputDepth; k++) {
236 int32_t hOutput = hOutputOrigin + static_cast<int32_t>(i);
237 int32_t wOutput = wOutputOrigin + static_cast<int32_t>(j);
238 if (hOutput >= 0 && hOutput < static_cast<int32_t>(outputHeight) &&
239 wOutput >= 0 && wOutput < static_cast<int32_t>(outputWidth)) {
240 uint32_t filterIndex =
241 k * filterHeight * filterWidth * inputDepth +
242 i * filterWidth * inputDepth + j * inputDepth + d;
243 uint32_t outputIndex = hOutput * outputWidth * outputDepth +
244 wOutput * outputDepth + k;
245 outputBase[outputIndex] +=
246 (static_cast<int32_t>(*inputPtr) + inputOffset) *
247 (static_cast<int32_t>(filterData[filterIndex]) +
248 filterOffset);
249 }
250 }
251 }
252 }
253
254 inputPtr++;
255 }
256 }
257 }
258 outputBase += outputHeight * outputWidth * outputDepth;
259 }
260
261 const uint32_t outerSize = numBatches * outputHeight * outputWidth;
262 int32_t* bufferPtr = tempBuffer;
263 T* outPtr = outputData;
264 for (uint32_t i = 0; i < outerSize; i++) {
265 for (uint32_t d = 0; d < outputDepth; d++, bufferPtr++, outPtr++) {
266 int32_t outVal = *bufferPtr + biasData[d];
267 outVal = tflite::MultiplyByQuantizedMultiplier(outVal, outputMultiplier, -outputShift);
268 outVal += outputOffset;
269 outVal = std::max(std::min(outVal, outputActivationMax), outputActivationMin);
270 *outPtr = static_cast<T>(outVal);
271 }
272 }
273
274 return true;
275 }
276
transposeConvNhwc(const _Float16 * inputData,const Shape & inputShape,const _Float16 * filterData,const Shape & filterShape,const _Float16 * biasData,const Shape & biasShape,const TransposeConv2dParam & param,_Float16 * outputData,const Shape & outputShape)277 bool transposeConvNhwc(const _Float16* inputData, const Shape& inputShape,
278 const _Float16* filterData, const Shape& filterShape,
279 const _Float16* biasData, const Shape& biasShape,
280 const TransposeConv2dParam& param, _Float16* outputData,
281 const Shape& outputShape) {
282 NNTRACE_TRANS("transposeConvFloat16");
283 std::vector<float> inputData_float32(getNumberOfElements(inputShape));
284 std::vector<float> filterData_float32(getNumberOfElements(filterShape));
285 std::vector<float> biasData_float32(getNumberOfElements(biasShape));
286 std::vector<float> outputData_float32(getNumberOfElements(outputShape));
287
288 convertFloat16ToFloat32(inputData, &inputData_float32);
289 convertFloat16ToFloat32(filterData, &filterData_float32);
290 convertFloat16ToFloat32(biasData, &biasData_float32);
291
292 transposeConvNhwc(inputData_float32.data(), inputShape, filterData_float32.data(), filterShape,
293 biasData_float32.data(), biasShape, param, outputData_float32.data(),
294 outputShape);
295 convertFloat32ToFloat16(outputData_float32, outputData);
296
297 return true;
298 }
299
300 template <typename T_Input, typename T_Filter, typename T_Bias>
transposeConv(const T_Input * inputData,const Shape & inputShape,const T_Filter * filterData,const Shape & filterShape,const T_Bias * biasData,const Shape & biasShape,const TransposeConv2dParam & param,T_Input * outputData,const Shape & outputShape)301 bool transposeConv(const T_Input* inputData, const Shape& inputShape, const T_Filter* filterData,
302 const Shape& filterShape, const T_Bias* biasData, const Shape& biasShape,
303 const TransposeConv2dParam& param, T_Input* outputData,
304 const Shape& outputShape) {
305 InputWithLayout<T_Input> input(param.useNchw);
306 OutputWithLayout<T_Input> output(param.useNchw);
307 NN_RET_CHECK(input.initialize(inputData, inputShape));
308 NN_RET_CHECK(output.initialize(outputData, outputShape));
309 NN_RET_CHECK(transposeConvNhwc(input.getNhwcBuffer(), input.getNhwcShape(), filterData,
310 filterShape, biasData, biasShape, param, output.getNhwcBuffer(),
311 output.getNhwcShape()));
312 NN_RET_CHECK(output.commit());
313 return true;
314 }
315
316 template <typename T>
transposeConvQuant8PerChannelNhwc(const T * inputData,const Shape & inputShape,const int8_t * filterData,const Shape & filterShape,const float * filterScales,const int32_t * biasData,const Shape & biasShape,const TransposeConv2dParam & param,T * outputData,const Shape & outputShape)317 bool transposeConvQuant8PerChannelNhwc(const T* inputData, const Shape& inputShape,
318 const int8_t* filterData, const Shape& filterShape,
319 const float* filterScales, const int32_t* biasData,
320 const Shape& biasShape, const TransposeConv2dParam& param,
321 T* outputData, const Shape& outputShape) {
322 NNTRACE_TRANS("transposeConvQuant8PerChannel");
323 ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
324
325 int32_t* tempBuffer = nullptr;
326 std::unique_ptr<int32_t[]> bufferGuard;
327 uint32_t tempBufferByteSize = getNumberOfElements(outputShape) * sizeof(int32_t);
328 if (tempBufferByteSize <= kStaticBufferSize) {
329 tempBuffer = reinterpret_cast<int32_t*>(static_scratch_buffer);
330 } else {
331 tempBuffer = new (std::nothrow) int32_t[tempBufferByteSize / sizeof(int32_t)];
332 if (tempBuffer == nullptr) {
333 LOG(ERROR) << "ConvTranspose size is too large, not enough memory";
334 return false;
335 }
336 bufferGuard.reset(tempBuffer);
337 }
338
339 int32_t inputOffset = -inputShape.offset;
340 int32_t outputOffset = outputShape.offset;
341
342 std::vector<double> realMultiplier(outputDepth, 0.0);
343 std::vector<int32_t> outputMultiplier(outputDepth, 0);
344 std::vector<int32_t> outputShift(outputDepth, 0);
345 for (int i = 0; i < outputDepth; ++i) {
346 Shape filterChannelShape = filterShape;
347 filterChannelShape.scale = filterScales[i];
348 Shape biasChannelShape = biasShape;
349 biasChannelShape.scale = filterScales[i] * inputShape.scale;
350
351 NN_RET_CHECK(GetQuantizedConvolutionMultipler(
352 inputShape, filterChannelShape, biasChannelShape, outputShape, &realMultiplier[i]));
353 int exponent;
354 NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &exponent));
355 outputShift[i] = -exponent;
356 }
357
358 int32_t outputActivationMin = 0, outputActivationMax = 0;
359 CalculateActivationRange<T>(activation, outputShape, &outputActivationMin,
360 &outputActivationMax);
361
362 // Prevent concurrent executions that may access the scratch buffer
363 std::unique_lock<std::mutex> lock(executionMutex);
364 memset(tempBuffer, 0, tempBufferByteSize);
365
366 const T* inputPtr = inputData;
367 int32_t* outputBase = tempBuffer;
368 for (uint32_t b = 0; b < numBatches; b++) {
369 for (uint32_t h = 0; h < inputHeight; h++) {
370 for (uint32_t w = 0; w < inputWidth; w++) {
371 for (uint32_t d = 0; d < inputDepth; d++) {
372 int32_t wOutputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft;
373 int32_t hOutputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop;
374
375 for (uint32_t i = 0; i < filterHeight; i++) {
376 for (uint32_t j = 0; j < filterWidth; j++) {
377 for (uint32_t k = 0; k < outputDepth; k++) {
378 int32_t hOutput = hOutputOrigin + static_cast<int32_t>(i);
379 int32_t wOutput = wOutputOrigin + static_cast<int32_t>(j);
380 if (hOutput >= 0 && hOutput < static_cast<int32_t>(outputHeight) &&
381 wOutput >= 0 && wOutput < static_cast<int32_t>(outputWidth)) {
382 uint32_t filterIndex =
383 k * filterHeight * filterWidth * inputDepth +
384 i * filterWidth * inputDepth + j * inputDepth + d;
385 uint32_t outputIndex = hOutput * outputWidth * outputDepth +
386 wOutput * outputDepth + k;
387 outputBase[outputIndex] +=
388 (static_cast<int32_t>(*inputPtr) + inputOffset) *
389 static_cast<int32_t>(filterData[filterIndex]);
390 }
391 }
392 }
393 }
394
395 inputPtr++;
396 }
397 }
398 }
399 outputBase += outputHeight * outputWidth * outputDepth;
400 }
401
402 const uint32_t outerSize = numBatches * outputHeight * outputWidth;
403 int32_t* bufferPtr = tempBuffer;
404 T* outPtr = outputData;
405 for (uint32_t i = 0; i < outerSize; i++) {
406 for (uint32_t d = 0; d < outputDepth; d++, bufferPtr++, outPtr++) {
407 int32_t outVal = *bufferPtr + biasData[d];
408 outVal = tflite::MultiplyByQuantizedMultiplier(outVal, outputMultiplier[d],
409 -outputShift[d]);
410 outVal += outputOffset;
411 outVal = std::max(std::min(outVal, outputActivationMax), outputActivationMin);
412 *outPtr = static_cast<T>(outVal);
413 }
414 }
415
416 return true;
417 }
418
419 template <typename T>
transposeConvQuant8PerChannel(const T * inputData,const Shape & inputShape,const int8_t * filterData,const Shape & filterShape,const float * filterScales,const int32_t * biasData,const Shape & biasShape,const TransposeConv2dParam & param,T * outputData,const Shape & outputShape)420 bool transposeConvQuant8PerChannel(const T* inputData, const Shape& inputShape,
421 const int8_t* filterData, const Shape& filterShape,
422 const float* filterScales, const int32_t* biasData,
423 const Shape& biasShape, const TransposeConv2dParam& param,
424 T* outputData, const Shape& outputShape) {
425 InputWithLayout<T> input(param.useNchw);
426 OutputWithLayout<T> output(param.useNchw);
427 NN_RET_CHECK(input.initialize(inputData, inputShape));
428 NN_RET_CHECK(output.initialize(outputData, outputShape));
429 NN_RET_CHECK(transposeConvQuant8PerChannelNhwc(
430 input.getNhwcBuffer(), input.getNhwcShape(), filterData, filterShape, filterScales,
431 biasData, biasShape, param, output.getNhwcBuffer(), output.getNhwcShape()));
432 NN_RET_CHECK(output.commit());
433 return true;
434 }
435
436 #undef ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
437 #endif // NN_INCLUDE_CPU_IMPLEMENTATION
438
439 } // namespace
440
validate(const IOperationValidationContext * context)441 Result<Version> validate(const IOperationValidationContext* context) {
442 const uint32_t inputCount = context->getNumInputs();
443 NN_RET_CHECK(inputCount == kNumInputs1 || inputCount == kNumInputs2);
444 NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
445 const auto inputType = context->getInputType(kInputTensor);
446 const auto filterType = context->getInputType(kFilterTensor);
447 std::vector<OperandType> inExpectedTypes;
448 Version minSupportedVersion = Version::ANDROID_Q;
449 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) {
450 inExpectedTypes = {inputType, inputType, inputType};
451 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
452 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
453 NN_RET_CHECK(filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
454 filterType == inputType)
455 << "Unsupported filter tensor type for operation " << kOperationName;
456 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
457 NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
458 context->getInputExtraParams(kFilterTensor))
459 .channelDim,
460 0)
461 << "Unsupported filter tensor channel dimension for operation "
462 << kOperationName;
463 }
464 inExpectedTypes = {inputType, filterType, OperandType::TENSOR_INT32};
465 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
466 minSupportedVersion = Version::ANDROID_R;
467 }
468 } else {
469 NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << kOperationName;
470 }
471
472 std::vector<OperandType> argExpectedTypes;
473 if (inputCount == 11) {
474 argExpectedTypes = {OperandType::INT32, OperandType::INT32, OperandType::INT32,
475 OperandType::INT32, OperandType::INT32, OperandType::INT32,
476 OperandType::INT32, OperandType::BOOL};
477 } else {
478 argExpectedTypes = {OperandType::TENSOR_INT32, OperandType::INT32, OperandType::INT32,
479 OperandType::INT32, OperandType::INT32, OperandType::BOOL};
480 }
481 inExpectedTypes.insert(inExpectedTypes.end(), argExpectedTypes.begin(), argExpectedTypes.end());
482 NN_RET_CHECK(validateInputTypes(context, inExpectedTypes));
483 NN_RET_CHECK(validateOutputTypes(context, {inputType}));
484 return minSupportedVersion;
485 }
486
487 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
prepare(IOperationExecutionContext * context)488 bool prepare(IOperationExecutionContext* context) {
489 Shape input = context->getInputShape(kInputTensor);
490 Shape filter = context->getInputShape(kFilterTensor);
491 Shape bias = context->getInputShape(kBiasTensor);
492
493 if (filter.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
494 NN_RET_CHECK(input.type == OperandType::TENSOR_QUANT8_ASYMM ||
495 input.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED);
496 } else {
497 NN_RET_CHECK(input.type == filter.type);
498 }
499 if (input.type == OperandType::TENSOR_QUANT8_ASYMM ||
500 input.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
501 NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32);
502 } else {
503 NN_RET_CHECK(input.type == bias.type);
504 }
505 NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
506 NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4);
507 NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1);
508
509 TransposeConv2dParam param;
510 NN_RET_CHECK(param.initialize(context));
511
512 uint32_t batches = getSizeOfDimension(input, 0);
513 uint32_t height = getSizeOfDimension(input, param.useNchw ? 2 : 1);
514 uint32_t width = getSizeOfDimension(input, param.useNchw ? 3 : 2);
515 uint32_t channels_in = getSizeOfDimension(input, param.useNchw ? 1 : 3);
516 uint32_t channels_out = getSizeOfDimension(filter, 0);
517 uint32_t filterHeight = getSizeOfDimension(filter, 1);
518 uint32_t filterWidth = getSizeOfDimension(filter, 2);
519 // Only batches can be zero.
520 NN_RET_CHECK_EQ(channels_in, getSizeOfDimension(filter, 3));
521 NN_RET_CHECK_EQ(channels_out, getSizeOfDimension(bias, 0));
522 NN_RET_CHECK_GT(height, 0);
523 NN_RET_CHECK_GT(width, 0);
524 NN_RET_CHECK_GT(channels_in, 0);
525 NN_RET_CHECK_GT(channels_out, 0);
526 NN_RET_CHECK_GT(filterWidth, 0);
527 NN_RET_CHECK_GT(filterHeight, 0);
528
529 uint32_t outWidth = computeOutSizeTransposeConv(width, filterWidth, param.strideWidth,
530 param.paddingLeft, param.paddingRight);
531 uint32_t outHeight = computeOutSizeTransposeConv(height, filterHeight, param.strideHeight,
532 param.paddingTop, param.paddingBottom);
533 NN_RET_CHECK_GT(outWidth, 0);
534 NN_RET_CHECK_GT(outHeight, 0);
535
536 Shape output = context->getOutputShape(kOutputTensor);
537 output.type = input.type;
538 if (param.useNchw) {
539 output.dimensions = {batches, channels_out, outHeight, outWidth};
540 } else {
541 output.dimensions = {batches, outHeight, outWidth, channels_out};
542 }
543 return context->setOutputShape(kOutputTensor, output);
544 }
545
execute(IOperationExecutionContext * context)546 bool execute(IOperationExecutionContext* context) {
547 // Bypass execution in the case of zero-sized input.
548 if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
549 TransposeConv2dParam param;
550 NN_RET_CHECK(param.initialize(context));
551 switch (context->getInputType(kInputTensor)) {
552 case OperandType::TENSOR_FLOAT32:
553 return transposeConv(context->getInputBuffer<float>(kInputTensor),
554 context->getInputShape(kInputTensor),
555 context->getInputBuffer<float>(kFilterTensor),
556 context->getInputShape(kFilterTensor),
557 context->getInputBuffer<float>(kBiasTensor),
558 context->getInputShape(kBiasTensor), param,
559 context->getOutputBuffer<float>(kOutputTensor),
560 context->getOutputShape(kOutputTensor));
561 case OperandType::TENSOR_FLOAT16:
562 return transposeConv(context->getInputBuffer<_Float16>(kInputTensor),
563 context->getInputShape(kInputTensor),
564 context->getInputBuffer<_Float16>(kFilterTensor),
565 context->getInputShape(kFilterTensor),
566 context->getInputBuffer<_Float16>(kBiasTensor),
567 context->getInputShape(kBiasTensor), param,
568 context->getOutputBuffer<_Float16>(kOutputTensor),
569 context->getOutputShape(kOutputTensor));
570 case OperandType::TENSOR_QUANT8_ASYMM:
571 if (context->getInputType(kFilterTensor) ==
572 OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
573 return transposeConvQuant8PerChannel(
574 context->getInputBuffer<uint8_t>(kInputTensor),
575 context->getInputShape(kInputTensor),
576 context->getInputBuffer<int8_t>(kFilterTensor),
577 context->getInputShape(kFilterTensor),
578 std::get<Operand::SymmPerChannelQuantParams>(
579 context->getInputExtraParams(kFilterTensor))
580 .scales.data(),
581 context->getInputBuffer<int32_t>(kBiasTensor),
582 context->getInputShape(kBiasTensor), param,
583 context->getOutputBuffer<uint8_t>(kOutputTensor),
584 context->getOutputShape(kOutputTensor));
585 } else if (context->getInputType(kFilterTensor) == OperandType::TENSOR_QUANT8_ASYMM) {
586 return transposeConv(context->getInputBuffer<uint8_t>(kInputTensor),
587 context->getInputShape(kInputTensor),
588 context->getInputBuffer<uint8_t>(kFilterTensor),
589 context->getInputShape(kFilterTensor),
590 context->getInputBuffer<int32_t>(kBiasTensor),
591 context->getInputShape(kBiasTensor), param,
592 context->getOutputBuffer<uint8_t>(kOutputTensor),
593 context->getOutputShape(kOutputTensor));
594 } else {
595 NN_RET_CHECK_FAIL() << "Unsupported filter type for operation " << kOperationName;
596 }
597 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
598 if (context->getInputType(kFilterTensor) ==
599 OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
600 return transposeConvQuant8PerChannel(
601 context->getInputBuffer<int8_t>(kInputTensor),
602 context->getInputShape(kInputTensor),
603 context->getInputBuffer<int8_t>(kFilterTensor),
604 context->getInputShape(kFilterTensor),
605 std::get<Operand::SymmPerChannelQuantParams>(
606 context->getInputExtraParams(kFilterTensor))
607 .scales.data(),
608 context->getInputBuffer<int32_t>(kBiasTensor),
609 context->getInputShape(kBiasTensor), param,
610 context->getOutputBuffer<int8_t>(kOutputTensor),
611 context->getOutputShape(kOutputTensor));
612 } else if (context->getInputType(kFilterTensor) ==
613 OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
614 return transposeConv(context->getInputBuffer<int8_t>(kInputTensor),
615 context->getInputShape(kInputTensor),
616 context->getInputBuffer<int8_t>(kFilterTensor),
617 context->getInputShape(kFilterTensor),
618 context->getInputBuffer<int32_t>(kBiasTensor),
619 context->getInputShape(kBiasTensor), param,
620 context->getOutputBuffer<int8_t>(kOutputTensor),
621 context->getOutputShape(kOutputTensor));
622 } else {
623 NN_RET_CHECK_FAIL() << "Unsupported filter type for operation " << kOperationName;
624 }
625 default:
626 NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
627 }
628 }
629 #endif // NN_INCLUDE_CPU_IMPLEMENTATION
630
631 } // namespace transpose_conv_2d
632
633 NN_REGISTER_OPERATION(TRANSPOSE_CONV_2D, transpose_conv_2d::kOperationName,
634 transpose_conv_2d::validate, transpose_conv_2d::prepare,
635 transpose_conv_2d::execute, .allowZeroSizedInput = true);
636
637 } // namespace nn
638 } // namespace android
639