1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Operations"
18
19 #include <algorithm>
20 #include <iterator>
21 #include <memory>
22 #include <vector>
23
24 #include "LegacyUtils.h"
25 #include "OperationResolver.h"
26 #include "Operations.h"
27 #include "OperationsUtils.h"
28 #include "Tracing.h"
29
30 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
31 #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h>
32 #include <tensorflow/lite/kernels/internal/reference/integer_ops/conv.h>
33 #include <tensorflow/lite/kernels/internal/types.h>
34
35 #include "CpuOperationUtils.h"
36 #endif // NN_INCLUDE_CPU_IMPLEMENTATION
37
38 namespace android {
39 namespace nn {
40 namespace conv_2d {
41
42 constexpr char kOperationName[] = "CONV_2D";
43
44 constexpr uint32_t kNumInputsArray[] = {7, 8, 10, 11, 13};
45 constexpr uint32_t kInputTensor = 0;
46 constexpr uint32_t kFilterTensor = 1;
47 constexpr uint32_t kBiasTensor = 2;
48
49 constexpr uint32_t kNumOutputs = 1;
50 constexpr uint32_t kOutputTensor = 0;
51
52 namespace {
53
54 // If possible we will use this static buffer for the tensor.
55 constexpr size_t kStaticBufferSize = 1605632;
56 char static_scratch_buffer[kStaticBufferSize];
57
58 // executionMutex is used to protect concurrent access of the static_scratch_buffer
59 // and other non-threadsafe resources like gemmlowp::GemmContext.
60 // std::mutex is safe for pthreads on Android.
61 std::mutex executionMutex;
62
63 struct Conv2dParam {
64 int32_t padding_left, padding_right;
65 int32_t padding_top, padding_bottom;
66 int32_t stride_width, stride_height;
67 int32_t dilation_width_factor = 1, dilation_height_factor = 1;
68 int32_t activation;
69 bool useNchw = false;
70
initializeandroid::nn::conv_2d::__anoncf6a863c0110::Conv2dParam71 bool initialize(const IOperationExecutionContext* context) {
72 uint32_t inCount = context->getNumInputs();
73 int32_t padding_implicit = 0;
74 bool useImplicitPadding = false;
75 if ((inCount >= 8 && context->getInputType(7) == OperandType::BOOL) || inCount == 7) {
76 padding_implicit = context->getInputValue<int32_t>(3);
77 stride_width = context->getInputValue<int32_t>(4);
78 stride_height = context->getInputValue<int32_t>(5);
79 activation = context->getInputValue<int32_t>(6);
80 if (inCount >= 8) {
81 useNchw = context->getInputValue<bool>(7);
82 }
83 if (inCount == 10) {
84 dilation_width_factor = context->getInputValue<int32_t>(8);
85 dilation_height_factor = context->getInputValue<int32_t>(9);
86 }
87 useImplicitPadding = true;
88 } else if (inCount >= 10 && context->getInputType(7) == OperandType::INT32) {
89 padding_left = context->getInputValue<int32_t>(3);
90 padding_right = context->getInputValue<int32_t>(4);
91 padding_top = context->getInputValue<int32_t>(5);
92 padding_bottom = context->getInputValue<int32_t>(6);
93 stride_width = context->getInputValue<int32_t>(7);
94 stride_height = context->getInputValue<int32_t>(8);
95 activation = context->getInputValue<int32_t>(9);
96 if (inCount >= 11) {
97 useNchw = context->getInputValue<bool>(10);
98 }
99 if (inCount == 13) {
100 dilation_width_factor = context->getInputValue<int32_t>(11);
101 dilation_height_factor = context->getInputValue<int32_t>(12);
102 }
103 } else {
104 NN_RET_CHECK_FAIL() << "Unsupported input spec for operation " << kOperationName;
105 }
106 if (useImplicitPadding) {
107 Shape inputShape = context->getInputShape(kInputTensor);
108 Shape filterShape = context->getInputShape(kFilterTensor);
109 int32_t input_width = getSizeOfDimension(inputShape, useNchw ? 3 : 2);
110 int32_t input_height = getSizeOfDimension(inputShape, useNchw ? 2 : 1);
111 int32_t filter_width = getSizeOfDimension(filterShape, 2);
112 int32_t filter_height = getSizeOfDimension(filterShape, 1);
113 calculateExplicitPadding(input_width, stride_width, dilation_width_factor, filter_width,
114 padding_implicit, &padding_left, &padding_right);
115 calculateExplicitPadding(input_height, stride_height, dilation_height_factor,
116 filter_height, padding_implicit, &padding_top,
117 &padding_bottom);
118 }
119 NN_RET_CHECK_GE(padding_left, 0);
120 NN_RET_CHECK_GE(padding_right, 0);
121 NN_RET_CHECK_GE(padding_top, 0);
122 NN_RET_CHECK_GE(padding_bottom, 0);
123 NN_RET_CHECK_GT(stride_width, 0);
124 NN_RET_CHECK_GT(stride_height, 0);
125 NN_RET_CHECK_GT(dilation_width_factor, 0);
126 NN_RET_CHECK_GT(dilation_height_factor, 0);
127 NN_RET_CHECK_GE(activation, 0);
128 return true;
129 }
130 };
131
132 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
133 #define ANDROID_NN_CONV_PARAMETERS(Type) \
134 uint32_t height = getSizeOfDimension(inputShape, 1); \
135 uint32_t width = getSizeOfDimension(inputShape, 2); \
136 uint32_t filterHeight = getSizeOfDimension(filterShape, 1); \
137 uint32_t filterWidth = getSizeOfDimension(filterShape, 2); \
138 uint32_t outHeight = getSizeOfDimension(outputShape, 1); \
139 uint32_t outWidth = getSizeOfDimension(outputShape, 2); \
140 uint32_t inDepth = getSizeOfDimension(inputShape, 3); \
141 \
142 uint32_t paddingHeight = (uint32_t)padding_top; \
143 uint32_t paddingWidth = (uint32_t)padding_left; \
144 \
145 tflite::Dims<4> im2colDim; \
146 im2colDim.sizes[3] = (int)getSizeOfDimension(outputShape, 0); \
147 im2colDim.sizes[2] = (int)getSizeOfDimension(outputShape, 1); \
148 im2colDim.sizes[1] = (int)getSizeOfDimension(outputShape, 2); \
149 im2colDim.sizes[0] = (int)inDepth * filterHeight * filterWidth; \
150 \
151 im2colDim.strides[0] = 1; \
152 for (int i = 1; i < 4; i++) { \
153 im2colDim.strides[i] = im2colDim.strides[i - 1] * im2colDim.sizes[i - 1]; \
154 } \
155 \
156 Type* im2colData = nullptr; \
157 uint64_t im2colByteSize = sizeof(Type); \
158 std::unique_ptr<Type[]> im2colGuard; \
159 for (int i = 0; i < 4; i++) { \
160 im2colByteSize *= im2colDim.sizes[i]; \
161 } \
162 /* http://b/77982879, tflite::optimized_ops::Conv uses int for offsets */ \
163 if (im2colByteSize >= 0x7fffffff) { \
164 LOG(ERROR) << "Conv size is too large, not enough memory"; \
165 return false; \
166 } \
167 if (im2colByteSize <= kStaticBufferSize) { \
168 im2colData = reinterpret_cast<Type*>(static_scratch_buffer); \
169 } else { \
170 im2colData = new (std::nothrow) Type[im2colByteSize / sizeof(Type)]; \
171 if (im2colData == nullptr) { \
172 LOG(ERROR) << "Conv size is too large, not enough memory"; \
173 return false; \
174 } \
175 im2colGuard.reset(im2colData); \
176 }
177
needim2colData(const Shape & filterShape,int32_t stride_width,int32_t stride_height,int32_t dilation_width_factor,int32_t dilation_height_factor)178 bool needim2colData(const Shape& filterShape, int32_t stride_width, int32_t stride_height,
179 int32_t dilation_width_factor, int32_t dilation_height_factor) {
180 // Within tflite::optimized_ops::Conv, the following tests are performed,
181 // and in the case (!need_dilated_im2col && !need_im2col), then the
182 // method doesn't expect to receive outputData. In debug mode this is
183 // asserted and fails tests, so we need to perform this check as the caller
184 // also. See:
185 // tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h:2655
186 const int filter_width = getSizeOfDimension(filterShape, 2);
187 const int filter_height = getSizeOfDimension(filterShape, 1);
188 const bool need_dilated_im2col = dilation_width_factor != 1 || dilation_height_factor != 1;
189 const bool need_im2col =
190 stride_width != 1 || stride_height != 1 || filter_width != 1 || filter_height != 1;
191 return need_dilated_im2col || need_im2col;
192 }
193
convNhwc(const float * inputData,const Shape & inputShape,const float * filterData,const Shape & filterShape,const float * biasData,const Shape & biasShape,int32_t padding_left,int32_t padding_right,int32_t padding_top,int32_t padding_bottom,int32_t stride_width,int32_t stride_height,int32_t dilation_width_factor,int32_t dilation_height_factor,int32_t activation,float * outputData,const Shape & outputShape)194 bool convNhwc(const float* inputData, const Shape& inputShape, const float* filterData,
195 const Shape& filterShape, const float* biasData, const Shape& biasShape,
196 int32_t padding_left, int32_t padding_right, int32_t padding_top,
197 int32_t padding_bottom, int32_t stride_width, int32_t stride_height,
198 int32_t dilation_width_factor, int32_t dilation_height_factor, int32_t activation,
199 float* outputData, const Shape& outputShape) {
200 NNTRACE_TRANS("convFloat32");
201
202 ANDROID_NN_CONV_PARAMETERS(float)
203
204 float output_activation_min, output_activation_max;
205 CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max);
206
207 // Prevent concurrent executions that may access the scratch buffer.
208 std::unique_lock<std::mutex> lock(executionMutex);
209 NNTRACE_COMP_SWITCH("optimized_ops::Conv");
210
211 const bool need_im2colData = needim2colData(filterShape, stride_width, stride_height,
212 dilation_width_factor, dilation_height_factor);
213
214 tflite::optimized_ops::Conv(
215 inputData, convertShapeToDims(inputShape), filterData, convertShapeToDims(filterShape),
216 biasData, convertShapeToDims(biasShape), stride_width, stride_height,
217 dilation_width_factor, dilation_height_factor, paddingWidth, paddingHeight,
218 output_activation_min, output_activation_max, outputData,
219 convertShapeToDims(outputShape), need_im2colData ? im2colData : nullptr, im2colDim);
220 return true;
221 }
222
convNhwc(const uint8_t * inputData,const Shape & inputShape,const uint8_t * filterData,const Shape & filterShape,const int32_t * biasData,const Shape & biasShape,int32_t padding_left,int32_t padding_right,int32_t padding_top,int32_t padding_bottom,int32_t stride_width,int32_t stride_height,int32_t dilation_width_factor,int32_t dilation_height_factor,int32_t activation,uint8_t * outputData,const Shape & outputShape)223 bool convNhwc(const uint8_t* inputData, const Shape& inputShape, const uint8_t* filterData,
224 const Shape& filterShape, const int32_t* biasData, const Shape& biasShape,
225 int32_t padding_left, int32_t padding_right, int32_t padding_top,
226 int32_t padding_bottom, int32_t stride_width, int32_t stride_height,
227 int32_t dilation_width_factor, int32_t dilation_height_factor, int32_t activation,
228 uint8_t* outputData, const Shape& outputShape) {
229 NNTRACE_TRANS("convQuant8");
230
231 ANDROID_NN_CONV_PARAMETERS(uint8_t)
232
233 int32_t inputOffset = -inputShape.offset;
234 int32_t filterOffset = -filterShape.offset;
235 int32_t outputOffset = outputShape.offset;
236
237 double real_multiplier = 0.0;
238 int32_t output_multiplier = 0;
239 int32_t output_shift = 0;
240 int32_t output_activation_min = 0;
241 int32_t output_activation_max = 0;
242
243 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape,
244 &real_multiplier));
245 int exponent;
246 NN_RET_CHECK(QuantizeMultiplier(real_multiplier, &output_multiplier, &exponent));
247 output_shift = -exponent;
248 CalculateActivationRangeUint8(activation, outputShape, &output_activation_min,
249 &output_activation_max);
250
251 static gemmlowp::GemmContext gemm_context;
252
253 // Prevent concurrent executions that may access the scratch buffer and
254 // gemm_context.
255 std::unique_lock<std::mutex> lock(executionMutex);
256 // Alow gemmlowp automatically decide how many threads to use.
257 gemm_context.set_max_num_threads(0);
258
259 NNTRACE_COMP_SWITCH("optimized_ops::Conv");
260
261 const bool need_im2colData = needim2colData(filterShape, stride_width, stride_height,
262 dilation_width_factor, dilation_height_factor);
263
264 tflite::optimized_ops::Conv(inputData, convertShapeToDims(inputShape), inputOffset, filterData,
265 convertShapeToDims(filterShape), filterOffset, biasData,
266 convertShapeToDims(biasShape), stride_width, stride_height,
267 dilation_width_factor, dilation_height_factor, paddingWidth,
268 paddingHeight, outputOffset, output_multiplier, output_shift,
269 output_activation_min, output_activation_max, outputData,
270 convertShapeToDims(outputShape),
271 need_im2colData ? im2colData : nullptr, im2colDim, &gemm_context);
272 return true;
273 }
274
275 // Passing input, filter and output shapes by value, so that we can change the
276 // offsets without modifying the actual shapes.
convNhwc(const int8_t * inputData,Shape inputShape,const int8_t * filterData,Shape filterShape,const int32_t * biasData,const Shape & biasShape,int32_t padding_left,int32_t padding_right,int32_t padding_top,int32_t padding_bottom,int32_t stride_width,int32_t stride_height,int32_t dilation_width_factor,int32_t dilation_height_factor,int32_t activation,int8_t * outputData,Shape outputShape)277 bool convNhwc(const int8_t* inputData, Shape inputShape, const int8_t* filterData,
278 Shape filterShape, const int32_t* biasData, const Shape& biasShape,
279 int32_t padding_left, int32_t padding_right, int32_t padding_top,
280 int32_t padding_bottom, int32_t stride_width, int32_t stride_height,
281 int32_t dilation_width_factor, int32_t dilation_height_factor, int32_t activation,
282 int8_t* outputData, Shape outputShape) {
283 NNTRACE_TRANS("convQuant8");
284
285 std::vector<uint8_t> unsignedInput(getNumberOfElements(inputShape));
286 convertInt8ToUInt8(inputData, &unsignedInput);
287 inputShape.offset += 128;
288
289 std::vector<uint8_t> unsignedFilter(getNumberOfElements(filterShape));
290 convertInt8ToUInt8(filterData, &unsignedFilter);
291 filterShape.offset += 128;
292
293 std::vector<uint8_t> unsignedOutput(getNumberOfElements(outputShape));
294 outputShape.offset += 128;
295
296 NN_RET_CHECK(convNhwc(unsignedInput.data(), inputShape, unsignedFilter.data(), filterShape,
297 biasData, biasShape, padding_left, padding_right, padding_top,
298 padding_bottom, stride_width, stride_height, dilation_width_factor,
299 dilation_height_factor, activation, unsignedOutput.data(), outputShape));
300
301 convertUInt8ToInt8(unsignedOutput, outputData);
302
303 return true;
304 }
305
convNhwc(const _Float16 * inputData,const Shape & inputShape,const _Float16 * filterData,const Shape & filterShape,const _Float16 * biasData,const Shape & biasShape,int32_t padding_left,int32_t padding_right,int32_t padding_top,int32_t padding_bottom,int32_t stride_width,int32_t stride_height,int32_t dilation_width_factor,int32_t dilation_height_factor,int32_t activation,_Float16 * outputData,const Shape & outputShape)306 bool convNhwc(const _Float16* inputData, const Shape& inputShape, const _Float16* filterData,
307 const Shape& filterShape, const _Float16* biasData, const Shape& biasShape,
308 int32_t padding_left, int32_t padding_right, int32_t padding_top,
309 int32_t padding_bottom, int32_t stride_width, int32_t stride_height,
310 int32_t dilation_width_factor, int32_t dilation_height_factor, int32_t activation,
311 _Float16* outputData, const Shape& outputShape) {
312 NNTRACE_TRANS("convFloat16");
313
314 std::vector<float> inputData_float32(getNumberOfElements(inputShape));
315 std::vector<float> filterData_float32(getNumberOfElements(filterShape));
316 std::vector<float> biasData_float32(getNumberOfElements(biasShape));
317 std::vector<float> outputData_float32(getNumberOfElements(outputShape));
318
319 convertFloat16ToFloat32(inputData, &inputData_float32);
320 convertFloat16ToFloat32(filterData, &filterData_float32);
321 convertFloat16ToFloat32(biasData, &biasData_float32);
322
323 convNhwc(inputData_float32.data(), inputShape, filterData_float32.data(), filterShape,
324 biasData_float32.data(), biasShape, padding_left, padding_right, padding_top,
325 padding_bottom, stride_width, stride_height, dilation_width_factor,
326 dilation_height_factor, activation, outputData_float32.data(), outputShape);
327 convertFloat32ToFloat16(outputData_float32, outputData);
328
329 return true;
330 }
331
332 template <typename T_Input, typename T_Filter, typename T_Bias>
conv(const T_Input * inputData,const Shape & inputShape,const T_Filter * filterData,const Shape & filterShape,const T_Bias * biasData,const Shape & biasShape,int32_t padding_left,int32_t padding_right,int32_t padding_top,int32_t padding_bottom,int32_t stride_width,int32_t stride_height,int32_t dilation_width_factor,int32_t dilation_height_factor,int32_t activation,bool useNchw,T_Input * outputData,const Shape & outputShape)333 bool conv(const T_Input* inputData, const Shape& inputShape, const T_Filter* filterData,
334 const Shape& filterShape, const T_Bias* biasData, const Shape& biasShape,
335 int32_t padding_left, int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
336 int32_t stride_width, int32_t stride_height, int32_t dilation_width_factor,
337 int32_t dilation_height_factor, int32_t activation, bool useNchw, T_Input* outputData,
338 const Shape& outputShape) {
339 InputWithLayout<T_Input> input(useNchw);
340 OutputWithLayout<T_Input> output(useNchw);
341 NN_RET_CHECK(input.initialize(inputData, inputShape));
342 NN_RET_CHECK(output.initialize(outputData, outputShape));
343 NN_RET_CHECK(convNhwc(input.getNhwcBuffer(), input.getNhwcShape(), filterData, filterShape,
344 biasData, biasShape, padding_left, padding_right, padding_top,
345 padding_bottom, stride_width, stride_height, dilation_width_factor,
346 dilation_height_factor, activation, output.getNhwcBuffer(),
347 output.getNhwcShape()));
348 NN_RET_CHECK(output.commit());
349 return true;
350 }
351
convQuant8PerChannelNhwc(const uint8_t * inputData,const Shape & inputShape,const int8_t * filterData,const Shape & filterShape,const float * filterScales,const int32_t * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t activation,uint8_t * outputData,const Shape & outputShape)352 bool convQuant8PerChannelNhwc(const uint8_t* inputData, const Shape& inputShape,
353 const int8_t* filterData, const Shape& filterShape,
354 const float* filterScales, const int32_t* biasData,
355 const Shape& biasShape, int32_t paddingLeft, int32_t paddingRight,
356 int32_t paddingTop, int32_t paddingBottom, int32_t strideWidth,
357 int32_t strideHeight, int32_t dilationWidthFactor,
358 int32_t dilationHeightFactor, int32_t activation, uint8_t* outputData,
359 const Shape& outputShape) {
360 NNTRACE_TRANS("convQuant8PerChannel");
361
362 uint32_t numBatches = getSizeOfDimension(inputShape, 0);
363 uint32_t inputHeight = getSizeOfDimension(inputShape, 1);
364 uint32_t inputWidth = getSizeOfDimension(inputShape, 2);
365 uint32_t inputDepth = getSizeOfDimension(inputShape, 3);
366 uint32_t filterHeight = getSizeOfDimension(filterShape, 1);
367 uint32_t filterWidth = getSizeOfDimension(filterShape, 2);
368 uint32_t filterDepth = getSizeOfDimension(filterShape, 3);
369 uint32_t outputHeight = getSizeOfDimension(outputShape, 1);
370 uint32_t outputWidth = getSizeOfDimension(outputShape, 2);
371 uint32_t outputDepth = getSizeOfDimension(outputShape, 3);
372
373 int32_t inputOffset = -inputShape.offset;
374 int32_t outputOffset = outputShape.offset;
375
376 auto realMultiplier = std::vector<double>(outputDepth, .0f);
377 auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
378 auto outputShift = std::vector<int32_t>(outputDepth, .0f);
379
380 for (int i = 0; i < outputDepth; ++i) {
381 Shape filterChannelShape = filterShape;
382 filterChannelShape.scale = filterScales[i];
383 Shape biasChannelShape = biasShape;
384 biasChannelShape.scale = filterScales[i] * inputShape.scale;
385 NN_RET_CHECK(GetQuantizedConvolutionMultipler(
386 inputShape, filterChannelShape, biasChannelShape, outputShape, &realMultiplier[i]));
387 int exponent;
388 NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &exponent));
389 outputShift[i] = -exponent;
390 }
391
392 int32_t output_activation_min = 0, output_activation_max = 0;
393 CalculateActivationRangeUint8(activation, outputShape, &output_activation_min,
394 &output_activation_max);
395 const uint8_t* inputBase = inputData;
396 uint8_t* outPtr = outputData;
397 for (uint32_t b = 0; b < numBatches; b++) {
398 for (uint32_t h = 0; h < outputHeight; h++) {
399 for (uint32_t w = 0; w < outputWidth; w++) {
400 const int8_t* filterBase = filterData;
401
402 for (uint32_t d = 0; d < outputDepth; d++) {
403 int32_t wInputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft;
404 int32_t hInputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop;
405 int32_t sum = 0.0f;
406
407 for (uint32_t i = 0; i < filterHeight; i++) {
408 for (uint32_t j = 0; j < filterWidth; j++) {
409 for (uint32_t k = 0; k < filterDepth; k++) {
410 int32_t hInput = hInputOrigin +
411 dilationHeightFactor * static_cast<int32_t>(i);
412 int32_t wInput = wInputOrigin +
413 dilationWidthFactor * static_cast<int32_t>(j);
414 uint32_t dInput = k;
415 if (hInput >= 0 && hInput < static_cast<int32_t>(inputHeight) &&
416 wInput >= 0 && wInput < static_cast<int32_t>(inputWidth)) {
417 uint32_t filterIndex =
418 i * filterWidth * filterDepth + j * filterDepth + k;
419 uint32_t inputIndex = hInput * inputWidth * inputDepth +
420 wInput * inputDepth + dInput;
421 sum += (static_cast<int32_t>(filterBase[filterIndex])) *
422 (static_cast<int32_t>(inputBase[inputIndex]) +
423 inputOffset);
424 }
425 }
426 }
427 }
428 sum += biasData[d];
429 sum = tflite::MultiplyByQuantizedMultiplier(sum, outputMultiplier[d],
430 -outputShift[d]);
431 sum += outputOffset;
432 sum = std::max(std::min(sum, output_activation_max), output_activation_min);
433 outPtr[d] = static_cast<uint8_t>(sum);
434 filterBase += filterHeight * filterWidth * filterDepth;
435 }
436 outPtr += outputDepth;
437 }
438 }
439 inputBase += inputHeight * inputWidth * inputDepth;
440 }
441
442 return true;
443 }
444
convQuant8PerChannelNhwc(const int8_t * inputData,const Shape & inputShape,const int8_t * filterData,const Shape & filterShape,const float * filterScales,const int32_t * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t activation,int8_t * outputData,const Shape & outputShape)445 bool convQuant8PerChannelNhwc(const int8_t* inputData, const Shape& inputShape,
446 const int8_t* filterData, const Shape& filterShape,
447 const float* filterScales, const int32_t* biasData,
448 const Shape& biasShape, int32_t paddingLeft, int32_t paddingRight,
449 int32_t paddingTop, int32_t paddingBottom, int32_t strideWidth,
450 int32_t strideHeight, int32_t dilationWidthFactor,
451 int32_t dilationHeightFactor, int32_t activation, int8_t* outputData,
452 const Shape& outputShape) {
453 NNTRACE_TRANS("convQuant8SignedPerChannel");
454
455 uint32_t numBatches = getSizeOfDimension(inputShape, 0);
456 uint32_t inputHeight = getSizeOfDimension(inputShape, 1);
457 uint32_t inputWidth = getSizeOfDimension(inputShape, 2);
458 uint32_t inputDepth = getSizeOfDimension(inputShape, 3);
459 uint32_t filterHeight = getSizeOfDimension(filterShape, 1);
460 uint32_t filterWidth = getSizeOfDimension(filterShape, 2);
461 uint32_t filterDepth = getSizeOfDimension(filterShape, 3);
462 uint32_t outputHeight = getSizeOfDimension(outputShape, 1);
463 uint32_t outputWidth = getSizeOfDimension(outputShape, 2);
464 uint32_t outputDepth = getSizeOfDimension(outputShape, 3);
465
466 int32_t inputOffset = -inputShape.offset;
467 int32_t outputOffset = outputShape.offset;
468
469 auto realMultiplier = std::vector<double>(outputDepth, .0f);
470 auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
471 auto outputShift = std::vector<int32_t>(outputDepth, .0f);
472
473 for (int i = 0; i < outputDepth; ++i) {
474 Shape filterChannelShape = filterShape;
475 filterChannelShape.scale = filterScales[i];
476 Shape biasChannelShape = biasShape;
477 biasChannelShape.scale = filterScales[i] * inputShape.scale;
478 NN_RET_CHECK(GetQuantizedConvolutionMultipler(
479 inputShape, filterChannelShape, biasChannelShape, outputShape, &realMultiplier[i]));
480 NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &outputShift[i]));
481 }
482
483 int32_t output_activation_min = 0, output_activation_max = 0;
484 CalculateActivationRangeInt8(activation, outputShape, &output_activation_min,
485 &output_activation_max);
486
487 tflite::ConvParams convParams;
488 convParams.input_offset = -inputShape.offset;
489 convParams.output_offset = outputShape.offset;
490 convParams.stride_height = strideHeight;
491 convParams.stride_width = strideWidth;
492 convParams.dilation_height_factor = dilationHeightFactor;
493 convParams.dilation_width_factor = dilationWidthFactor;
494 convParams.padding_values.height = paddingTop;
495 convParams.padding_values.width = paddingLeft;
496 convParams.quantized_activation_min = output_activation_min;
497 convParams.quantized_activation_max = output_activation_max;
498
499 NNTRACE_COMP_SWITCH("reference_integer_ops::ConvPerChannel");
500 tflite::reference_integer_ops::ConvPerChannel(
501 convParams, outputMultiplier.data(), outputShift.data(),
502 convertShapeToTflshape(inputShape), inputData, convertShapeToTflshape(filterShape),
503 filterData, convertShapeToTflshape(biasShape), biasData,
504 convertShapeToTflshape(outputShape), outputData);
505 return true;
506 }
507
508 template <typename T>
convQuant8PerChannel(const T * inputData,const Shape & inputShape,const int8_t * filterData,const Shape & filterShape,const float * filterScales,const int32_t * biasData,const Shape & biasShape,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom,int32_t strideWidth,int32_t strideHeight,int32_t dilationWidthFactor,int32_t dilationHeightFactor,int32_t activation,bool useNchw,T * outputData,const Shape & outputShape)509 bool convQuant8PerChannel(const T* inputData, const Shape& inputShape, const int8_t* filterData,
510 const Shape& filterShape, const float* filterScales,
511 const int32_t* biasData, const Shape& biasShape, int32_t paddingLeft,
512 int32_t paddingRight, int32_t paddingTop, int32_t paddingBottom,
513 int32_t strideWidth, int32_t strideHeight, int32_t dilationWidthFactor,
514 int32_t dilationHeightFactor, int32_t activation, bool useNchw,
515 T* outputData, const Shape& outputShape) {
516 InputWithLayout<T> input(useNchw);
517 OutputWithLayout<T> output(useNchw);
518 NN_RET_CHECK(input.initialize(inputData, inputShape));
519 NN_RET_CHECK(output.initialize(outputData, outputShape));
520 NN_RET_CHECK(convQuant8PerChannelNhwc(
521 input.getNhwcBuffer(), input.getNhwcShape(), filterData, filterShape, filterScales,
522 biasData, biasShape, paddingLeft, paddingRight, paddingTop, paddingBottom, strideWidth,
523 strideHeight, dilationWidthFactor, dilationHeightFactor, activation,
524 output.getNhwcBuffer(), output.getNhwcShape()));
525 NN_RET_CHECK(output.commit());
526 return true;
527 }
528
529 #undef ANDROID_NN_CONV_PARAMETERS
530 #endif // NN_INCLUDE_CPU_IMPLEMENTATION
531
532 } // namespace
533
validate(const IOperationValidationContext * context)534 Result<Version> validate(const IOperationValidationContext* context) {
535 const uint32_t numInputs = context->getNumInputs();
536 NN_RET_CHECK(
537 std::binary_search(std::begin(kNumInputsArray), std::end(kNumInputsArray), numInputs));
538 NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
539 const auto inputRank = getNumberOfDimensions(context->getInputShape(kInputTensor));
540 const auto filterRank = getNumberOfDimensions(context->getInputShape(kFilterTensor));
541 if (inputRank != 0) {
542 NN_RET_CHECK_EQ(inputRank, 4);
543 }
544 if (filterRank != 0) {
545 NN_RET_CHECK_EQ(filterRank, 4);
546 }
547 auto inputCount = context->getNumInputs();
548 auto inputType = context->getInputType(kInputTensor);
549 auto filterType = context->getInputType(kFilterTensor);
550 std::vector<OperandType> inExpectedTypes;
551 if (inputType == OperandType::TENSOR_FLOAT32) {
552 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
553 OperandType::TENSOR_FLOAT32, OperandType::INT32,
554 OperandType::INT32, OperandType::INT32,
555 OperandType::INT32};
556 } else if (inputType == OperandType::TENSOR_FLOAT16) {
557 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
558 OperandType::TENSOR_FLOAT16, OperandType::INT32,
559 OperandType::INT32, OperandType::INT32,
560 OperandType::INT32};
561 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
562 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
563 NN_RET_CHECK(filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
564 filterType == inputType)
565 << "Unsupported filter tensor type for operation " << kOperationName;
566 inExpectedTypes = {inputType, filterType, OperandType::TENSOR_INT32,
567 OperandType::INT32, OperandType::INT32, OperandType::INT32,
568 OperandType::INT32};
569
570 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
571 NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>(
572 context->getInputExtraParams(kFilterTensor))
573 .channelDim,
574 0)
575 << "Unsupported filter tensor channel dimension for operation "
576 << kOperationName;
577 }
578 } else {
579 NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << kOperationName;
580 }
581
582 // NeuralNetworks.h specifies that ANEURALNETWORKS_CONV_2D's output must
583 // meet "outputScale > inputScale * filterScale" for the operand type
584 // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM before API level 29. For other
585 // operand types (e.g., ANEURALNETWORKS_TENSOR_FLOAT32), this constraint
586 // does not apply, so by default the constraint is met.
587 bool meetsQuantizedScaleConstraintBeforeV1_2 = true;
588 if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
589 const float inputScale = context->getInputShape(kInputTensor).scale;
590 const float filterScale = context->getInputShape(kFilterTensor).scale;
591 const float outputScale = context->getInputShape(kOutputTensor).scale;
592 meetsQuantizedScaleConstraintBeforeV1_2 = (outputScale > inputScale * filterScale);
593 }
594
595 bool withExplicitPadding = false;
596 bool withLayout = false;
597 bool withDilation = false;
598 if (inputCount >= 8) {
599 if (context->getInputType(7) == OperandType::INT32 && inputCount >= 10) {
600 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
601 inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
602 explicitScalarTypes.end());
603 withExplicitPadding = true;
604 }
605 int inputOffset = withExplicitPadding ? 3 : 0;
606 if (inputCount >= 8 + inputOffset) {
607 inExpectedTypes.push_back(OperandType::BOOL);
608 withLayout = true;
609 }
610 NN_RET_CHECK_NE(inputCount, 9 + inputOffset)
611 << "Provided only one dilation factor value, two values are requred for operation "
612 << kOperationName;
613 if (inputCount == 10 + inputOffset) {
614 inExpectedTypes.push_back(OperandType::INT32);
615 inExpectedTypes.push_back(OperandType::INT32);
616 withDilation = true;
617 }
618 }
619
620 auto minSupportedVersion = Version::ANDROID_OC_MR1;
621 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
622 minSupportedVersion = Version::ANDROID_R;
623 } else if (inputType == OperandType::TENSOR_FLOAT16 ||
624 filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || withLayout ||
625 withDilation || !meetsQuantizedScaleConstraintBeforeV1_2) {
626 minSupportedVersion = Version::ANDROID_Q;
627 } else {
628 minSupportedVersion = Version::ANDROID_OC_MR1;
629 }
630 NN_RET_CHECK(validateInputTypes(context, inExpectedTypes));
631 NN_RET_CHECK(validateOutputTypes(context, {inputType}));
632 return minSupportedVersion;
633 }
634
635 #ifdef NN_INCLUDE_CPU_IMPLEMENTATION
prepare(IOperationExecutionContext * context)636 bool prepare(IOperationExecutionContext* context) {
637 Shape input = context->getInputShape(kInputTensor);
638 Shape filter = context->getInputShape(kFilterTensor);
639 Shape bias = context->getInputShape(kBiasTensor);
640
641 if (filter.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
642 NN_RET_CHECK(input.type == OperandType::TENSOR_QUANT8_ASYMM ||
643 input.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED);
644 } else {
645 NN_RET_CHECK(input.type == filter.type);
646 }
647 if (input.type == OperandType::TENSOR_QUANT8_ASYMM ||
648 input.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
649 NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32);
650 } else {
651 NN_RET_CHECK(input.type == bias.type);
652 }
653 NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
654 NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4);
655 NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1);
656
657 Conv2dParam param;
658 NN_RET_CHECK(param.initialize(context));
659
660 uint32_t batches = getSizeOfDimension(input, 0);
661 uint32_t height = getSizeOfDimension(input, param.useNchw ? 2 : 1);
662 uint32_t width = getSizeOfDimension(input, param.useNchw ? 3 : 2);
663 uint32_t channels_in = getSizeOfDimension(input, param.useNchw ? 1 : 3);
664 uint32_t channels_out = getSizeOfDimension(filter, 0);
665 uint32_t filterHeight = getSizeOfDimension(filter, 1);
666 uint32_t filterWidth = getSizeOfDimension(filter, 2);
667 // Only batches can be zero.
668 NN_RET_CHECK_EQ(channels_in, getSizeOfDimension(filter, 3));
669 NN_RET_CHECK_EQ(channels_out, getSizeOfDimension(bias, 0));
670 NN_RET_CHECK_GT(height, 0);
671 NN_RET_CHECK_GT(width, 0);
672 NN_RET_CHECK_GT(channels_in, 0);
673 NN_RET_CHECK_GT(channels_out, 0);
674
675 int32_t effectiveFilterWidth = (filterWidth - 1) * param.dilation_width_factor + 1;
676 int32_t effectiveFilterHeight = (filterHeight - 1) * param.dilation_height_factor + 1;
677 NN_RET_CHECK_GT(effectiveFilterWidth, param.padding_left);
678 NN_RET_CHECK_GT(effectiveFilterWidth, param.padding_right);
679 NN_RET_CHECK_GT(effectiveFilterHeight, param.padding_top);
680 NN_RET_CHECK_GT(effectiveFilterHeight, param.padding_bottom);
681
682 uint32_t outWidth =
683 computeOutSize(width, filterWidth, param.stride_width, param.dilation_width_factor,
684 param.padding_left, param.padding_right);
685 uint32_t outHeight =
686 computeOutSize(height, filterHeight, param.stride_height, param.dilation_height_factor,
687 param.padding_top, param.padding_bottom);
688
689 Shape output = context->getOutputShape(kOutputTensor);
690 output.type = input.type;
691 if (param.useNchw) {
692 output.dimensions = {batches, channels_out, outHeight, outWidth};
693 } else {
694 output.dimensions = {batches, outHeight, outWidth, channels_out};
695 }
696 return context->setOutputShape(kOutputTensor, output);
697 }
698
execute(IOperationExecutionContext * context)699 bool execute(IOperationExecutionContext* context) {
700 // Bypass execution in the case of zero-sized input.
701 if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
702 Conv2dParam param;
703 NN_RET_CHECK(param.initialize(context));
704 switch (context->getInputType(kInputTensor)) {
705 case OperandType::TENSOR_FLOAT32:
706 return conv(context->getInputBuffer<float>(kInputTensor),
707 context->getInputShape(kInputTensor),
708 context->getInputBuffer<float>(kFilterTensor),
709 context->getInputShape(kFilterTensor),
710 context->getInputBuffer<float>(kBiasTensor),
711 context->getInputShape(kBiasTensor), param.padding_left,
712 param.padding_right, param.padding_top, param.padding_bottom,
713 param.stride_width, param.stride_height, param.dilation_width_factor,
714 param.dilation_height_factor, param.activation, param.useNchw,
715 context->getOutputBuffer<float>(kOutputTensor),
716 context->getOutputShape(kOutputTensor));
717 case OperandType::TENSOR_FLOAT16:
718 return conv(context->getInputBuffer<_Float16>(kInputTensor),
719 context->getInputShape(kInputTensor),
720 context->getInputBuffer<_Float16>(kFilterTensor),
721 context->getInputShape(kFilterTensor),
722 context->getInputBuffer<_Float16>(kBiasTensor),
723 context->getInputShape(kBiasTensor), param.padding_left,
724 param.padding_right, param.padding_top, param.padding_bottom,
725 param.stride_width, param.stride_height, param.dilation_width_factor,
726 param.dilation_height_factor, param.activation, param.useNchw,
727 context->getOutputBuffer<_Float16>(kOutputTensor),
728 context->getOutputShape(kOutputTensor));
729 case OperandType::TENSOR_QUANT8_ASYMM:
730 if (context->getInputType(kFilterTensor) ==
731 OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
732 return convQuant8PerChannel(
733 context->getInputBuffer<uint8_t>(kInputTensor),
734 context->getInputShape(kInputTensor),
735 context->getInputBuffer<int8_t>(kFilterTensor),
736 context->getInputShape(kFilterTensor),
737 std::get<Operand::SymmPerChannelQuantParams>(
738 context->getInputExtraParams(kFilterTensor))
739 .scales.data(),
740 context->getInputBuffer<int32_t>(kBiasTensor),
741 context->getInputShape(kBiasTensor), param.padding_left,
742 param.padding_right, param.padding_top, param.padding_bottom,
743 param.stride_width, param.stride_height, param.dilation_width_factor,
744 param.dilation_height_factor, param.activation, param.useNchw,
745 context->getOutputBuffer<uint8_t>(kOutputTensor),
746 context->getOutputShape(kOutputTensor));
747 } else if (context->getInputType(kFilterTensor) == OperandType::TENSOR_QUANT8_ASYMM) {
748 return conv(context->getInputBuffer<uint8_t>(kInputTensor),
749 context->getInputShape(kInputTensor),
750 context->getInputBuffer<uint8_t>(kFilterTensor),
751 context->getInputShape(kFilterTensor),
752 context->getInputBuffer<int32_t>(kBiasTensor),
753 context->getInputShape(kBiasTensor), param.padding_left,
754 param.padding_right, param.padding_top, param.padding_bottom,
755 param.stride_width, param.stride_height, param.dilation_width_factor,
756 param.dilation_height_factor, param.activation, param.useNchw,
757 context->getOutputBuffer<uint8_t>(kOutputTensor),
758 context->getOutputShape(kOutputTensor));
759 } else {
760 NN_RET_CHECK_FAIL() << "Unsupported filter type for operation " << kOperationName;
761 }
762 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
763 if (context->getInputType(kFilterTensor) ==
764 OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
765 return convQuant8PerChannel(
766 context->getInputBuffer<int8_t>(kInputTensor),
767 context->getInputShape(kInputTensor),
768 context->getInputBuffer<int8_t>(kFilterTensor),
769 context->getInputShape(kFilterTensor),
770 std::get<Operand::SymmPerChannelQuantParams>(
771 context->getInputExtraParams(kFilterTensor))
772 .scales.data(),
773 context->getInputBuffer<int32_t>(kBiasTensor),
774 context->getInputShape(kBiasTensor), param.padding_left,
775 param.padding_right, param.padding_top, param.padding_bottom,
776 param.stride_width, param.stride_height, param.dilation_width_factor,
777 param.dilation_height_factor, param.activation, param.useNchw,
778 context->getOutputBuffer<int8_t>(kOutputTensor),
779 context->getOutputShape(kOutputTensor));
780 } else if (context->getInputType(kFilterTensor) ==
781 OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
782 return conv(context->getInputBuffer<int8_t>(kInputTensor),
783 context->getInputShape(kInputTensor),
784 context->getInputBuffer<int8_t>(kFilterTensor),
785 context->getInputShape(kFilterTensor),
786 context->getInputBuffer<int32_t>(kBiasTensor),
787 context->getInputShape(kBiasTensor), param.padding_left,
788 param.padding_right, param.padding_top, param.padding_bottom,
789 param.stride_width, param.stride_height, param.dilation_width_factor,
790 param.dilation_height_factor, param.activation, param.useNchw,
791 context->getOutputBuffer<int8_t>(kOutputTensor),
792 context->getOutputShape(kOutputTensor));
793 } else {
794 NN_RET_CHECK_FAIL() << "Unsupported filter type for operation " << kOperationName;
795 }
796 default:
797 NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
798 }
799 }
800 #endif // NN_INCLUDE_CPU_IMPLEMENTATION
801
802 } // namespace conv_2d
803
804 NN_REGISTER_OPERATION(CONV_2D, conv_2d::kOperationName, conv_2d::validate, conv_2d::prepare,
805 conv_2d::execute, .allowZeroSizedInput = true);
806
807 } // namespace nn
808 } // namespace android
809