1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERATION_TYPES_H
18 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERATION_TYPES_H
19 
20 namespace android::nn {
21 
22 /**
23  * Operation types.
24  *
25  * The type of an operation in a model.
26  */
27 enum class OperationType {
28     /**
29      * Adds two tensors, element-wise.
30      *
31      * Takes two input tensors of identical {@link OperandType} and compatible
32      * dimensions. The output is the sum of both input tensors, optionally
33      * modified by an activation function.
34      *
35      * Two dimensions are compatible when:
36      *     1. they are equal, or
37      *     2. one of them is 1
38      *
39      * The size of the output is the maximum size along each dimension of the
40      * input operands. It starts with the trailing dimensions, and works its
41      * way forward.
42      *
43      * Example:
44      *
45      *     input1.dimension = {4, 1, 2}
46      *     input2.dimension = {5, 4, 3, 1}
47      *     output.dimension = {5, 4, 3, 2}
48      *
49      * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
50      * dimension is only compatible with 0 or 1. The size of the output
51      * dimension is zero if either of corresponding input dimension is zero.
52      *
53      * Supported tensor {@link OperandType}:
54      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
55      * * {@link OperandType::TENSOR_FLOAT32}
56      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
57      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
58      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
59      *
60      * Supported tensor rank: up to 4
61      *
62      * Inputs:
63      * * 0: A tensor.
64      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
65      *      as input0.
66      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
67      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
68      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
69      * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
70      *      {@link FusedActivationFunc} values. Specifies the activation to
71      *      invoke on the result.
72      *      For a {@link OperandType::TENSOR_INT32} tensor,
73      *      the {@link FusedActivationFunc} must be "NONE".
74      *
75      * Outputs:
76      * * 0: The sum, a tensor of the same {@link OperandType} as input0.
77      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
78      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
79      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
80      */
81     ADD = 0,
82 
83     /**
84      * Performs a 2-D average pooling operation.
85      *
86      * The output dimensions are functions of the filter dimensions, stride, and
87      * padding.
88      *
89      * The values in the output tensor are computed as:
90      *
91      *     output[b, i, j, channel] =
92      *         sum_{di, dj}(
93      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
94      *         ) / sum(1)
95      *
96      * Supported tensor {@link OperandType}:
97      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
98      * * {@link OperandType::TENSOR_FLOAT32}
99      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
100      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
101      *
102      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
103      * With the default data layout NHWC, the data is stored in the order of:
104      * [batch, height, width, channels]. Alternatively, the data layout could
105      * be NCHW, the data storage order of: [batch, channels, height, width].
106      * NCHW is supported since HAL version 1.2.
107      *
108      * Both explicit padding and implicit padding are supported.
109      *
110      * Inputs (explicit padding):
111      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
112      *      the input.
113      *      Since HAL version 1.2, zero batches is supported for this tensor.
114      * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
115      *      the left, in the ‘width’ dimension.
116      * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
117      *      the right, in the ‘width’ dimension.
118      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
119      *      the top, in the ‘height’ dimension.
120      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
121      *      the bottom, in the ‘height’ dimension.
122      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
123      *      walking through input in the ‘width’ dimension.
124      * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
125      *      walking through input in the ‘height’ dimension.
126      * * 7: An {@link OperandType::INT32} scalar, specifying the filter
127      *      width.
128      * * 8: An {@link OperandType::INT32} scalar, specifying the filter
129      *      height.
130      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
131      *      {@link FusedActivationFunc} values. Specifies the activation to
132      *      invoke on the result.
133      * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
134      *       Set to true to specify NCHW data layout for input0 and output0.
135      *       Available since HAL version 1.2.
136      *
137      * Inputs (implicit padding):
138      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
139      *      the input.
140      *      Since HAL version 1.2, zero batches is supported for this tensor.
141      * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
142      *      padding scheme, has to be one of the
143      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
144      * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
145      *      walking through input in the ‘width’ dimension.
146      * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
147      *      walking through input in the ‘height’ dimension.
148      * * 4: An {@link OperandType::INT32} scalar, specifying the filter
149      *      width.
150      * * 5: An {@link OperandType::INT32} scalar, specifying the filter
151      *      height.
152      * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
153      *      {@link FusedActivationFunc} values. Specifies the activation to
154      *      invoke on the result.
155      * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
156      *      Set to true to specify NCHW data layout for input0 and output0.
157      *      Available since HAL version 1.2.
158      *
159      * Outputs:
160      * * 0: The output 4-D tensor, of shape
161      *      [batches, out_height, out_width, depth].
162      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
163      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
164      *      the scale and zeroPoint must be the same as input0.
165      */
166     AVERAGE_POOL_2D = 1,
167 
168     /**
169      * Concatenates the input tensors along the given dimension.
170      *
171      * The input tensors must have identical {@link OperandType} and the same
172      * dimensions except the dimension along the concatenation axis.
173      *
174      * Supported tensor {@link OperandType}:
175      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
176      * * {@link OperandType::TENSOR_FLOAT32}
177      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
178      *   (full support since HAL version 1.2, see the input section)
179      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
180      *
181      * Supported tensor rank: up to 4
182      *
183      * Inputs:
184      * * 0 ~ n-1: The list of n input tensors, of shape
185      *            [D0, D1, ..., Daxis(i), ..., Dm].
186      *            Before HAL version 1.2, all input tensors of
187      *            {@link OperandType::TENSOR_QUANT8_ASYMM}
188      *            must have the same scale and zeroPoint as the output tensor.
189      *            Input tensors of
190      *            {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
191      *            are allowed to have different scale and zeroPoint.
192      *            Since HAL version 1.2, zero-sized tensors are supported.
193      * * n: An {@link OperandType::INT32} scalar, specifying the
194      *      concatenation axis.
195      *
196      * Outputs:
197      * * 0: The output, a tensor of the same {@link OperandType} as the input
198      *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
199      *      Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
200      *      the scale and zeroPoint values can be different from
201      *      input tensors. Before HAL version 1.2 they have to be the same as for the
202      *      input tensors.
203      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
204      *      the scale and zeroPoint values can be different from input tensors.
205      */
206     CONCATENATION = 2,
207 
208     /**
209      * Performs a 2-D convolution operation.
210      *
211      * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
212      * batch of images, applying the filter to each window of each image of the
213      * appropriate size.
214      *
215      * The output dimensions are functions of the filter dimensions, stride, and
216      * padding.
217      *
218      * The values in the output tensor are computed as:
219      *
220      *     output[b, i, j, channel] =
221      *         sum_{di, dj, k} (
222      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
223      *             filter[channel, di, dj, k]
224      *         ) + bias[channel]
225      *
226      * Supported tensor {@link OperandType} configurations:
227      * * 32 bit floating point:
228      * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
229      *
230      * * Quantized:
231      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
232      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
233      * * * input.scale * filter.scale).
234      *
235      * Available since HAL version 1.2:
236      * * 16 bit floating point:
237      * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
238      *
239      * * Quantized with symmetric per channel quantization for the filter:
240      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
241      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
242      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
243      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
244      *
245      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
246      * With the default data layout NHWC, the data is stored in the order of:
247      * [batch, height, width, channels]. Alternatively, the data layout could
248      * be NCHW, the data storage order of: [batch, channels, height, width].
249      * NCHW is supported since HAL version 1.2.
250      *
251      * Both explicit padding and implicit padding are supported.
252      *
253      * Inputs (explicit padding):
254      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
255      *      specifying the input.
256      *      Since HAL version 1.2, zero batches is supported for this tensor.
257      * * 1: A 4-D tensor, of shape
258      *      [depth_out, filter_height, filter_width, depth_in], specifying the
259      *      filter.
260      *      For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
261      *      the channel dimension (SymmPerChannelQuantParams::channelDim)
262      *      must be set to 0.
263      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
264      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
265      *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
266      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
267      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
268      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
269      *      of 0 and bias_scale == input_scale * filter_scale.
270      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
271      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
272      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
273      *      bias_scale[i] = input_scale * filter_scale[i].
274      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
275      *      the left, in the ‘width’ dimension.
276      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
277      *      the right, in the ‘width’ dimension.
278      * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
279      *      the top, in the ‘height’ dimension.
280      * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
281      *      the bottom, in the ‘height’ dimension.
282      * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
283      *      walking through input in the ‘width’ dimension.
284      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
285      *      walking through input in the ‘height’ dimension.
286      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
287      *      {@link FusedActivationFunc} values. Specifies the activation to
288      *      invoke on the result.
289      * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
290      *      Set to true to specify NCHW data layout for input0 and output0.
291      *      Available since HAL version 1.2.
292      * * 11: An optional {@link OperandType::INT32} scalar, specifying the dilation
293      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
294      *      cells between each filter element on width dimension. If this input is set,
295      *      input 12 (dilation factor for height) must be specified as well.
296      *      Available since HAL version 1.2.
297      * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
298      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
299      *      cells between each filter element on height dimension. If this input is set,
300      *      input 11 (dilation factor for width) must be specified as well.
301      *      Available since HAL version 1.2.
302      *
303      * Inputs (implicit padding):
304      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
305      *      specifying the input.
306      *      Since HAL version 1.2, zero batches is supported for this tensor.
307      * * 1: A 4-D tensor, of shape
308      *      [depth_out, filter_height, filter_width, depth_in], specifying the
309      *      filter.
310      *      For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
311      *      the channel dimension (SymmPerChannelQuantParams::channelDim)
312      *      must be set to 0.
313      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
314      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
315      *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
316      *      type.
317      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
318      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
319      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
320      *      of 0 and bias_scale == input_scale * filter_scale.
321      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
322      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
323      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
324      *      bias_scale[i] = input_scale * filter_scale[i].
325      * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
326      *      padding scheme, has to be one of the
327      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
328      * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
329      *      walking through input in the ‘width’ dimension.
330      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
331      *      walking through input in the ‘height’ dimension.
332      * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
333      *      {@link FusedActivationFunc} values. Specifies the activation to
334      *      invoke on the result.
335      * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
336      *      Set to true to specify NCHW data layout for input0 and output0.
337      *      Available since HAL version 1.2.
338      * * 8: An optional {@link OperandType::INT32} scalar, specifying the dilation
339      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
340      *      cells between each filter element on width dimension. If this input is set,
341      *      input 9 (dilation factor for height) must be specified as well.
342      *      Available since HAL version 1.2.
343      * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
344      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
345      *      cells between each filter element on height dimension. If this input is set,
346      *      input 8 (dilation factor for width) must be specified as well.
347      *      Available since HAL version 1.2.
348      *
349      * Outputs:
350      * * 0: The output 4-D tensor, of shape
351      *      [batches, out_height, out_width, depth_out].
352      *      Before HAL version 1.2, for output tensor of
353      *      {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must
354      *      be satisfied: output_scale > input_scale * filter_scale
355      */
356     CONV_2D = 3,
357 
358     /**
359      * Performs a depthwise 2-D convolution operation.
360      *
361      * Given an input tensor of shape [batches, height, width, depth_in] and a
362      * filter tensor of shape [1, filter_height, filter_width, depth_out]
363      * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
364      * applies a different filter to each input channel (expanding from 1
365      * channel to channel_multiplier channels for each), then concatenates the
366      * results together.
367      *
368      * The output has depth_out = depth_in * depth_multiplier channels.
369      * The output dimensions are functions of the filter dimensions, stride, and
370      * padding.
371      *
372      * The values in the output tensor are computed as:
373      *
374      *     output[b, i, j, k * channel_multiplier + q] =
375      *         sum_{di, dj} (
376      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
377      *             filter[1, di, dj, k * channel_multiplier + q]
378      *         ) + bias[k * channel_multiplier + q]
379      *
380      * Supported tensor {@link OperandType} configurations:
381      * * 32 bit floating point:
382      * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
383      *
384      * * Quantized:
385      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
386      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
387      * * * input.scale * filter.scale).
388      *
389      * Available since HAL version 1.2:
390      * * 16 bit floating point:
391      * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
392      *
393      * * Quantized with symmetric per channel quantization for the filter:
394      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
395      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
396      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
397      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
398      *
399      * Available since HAL version 1.3:
400      * * Quantized signed (since HAL version 1.3):
401      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
402      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
403      * * * input.scale * filter.scale).
404      *
405      * * Quantized signed with filter symmetric per channel quantization
406      *   (since HAL version 1.3):
407      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
408      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
409      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
410      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
411      *
412      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
413      * With the default data layout NHWC, the data is stored in the order of:
414      * [batch, height, width, channels]. Alternatively, the data layout could
415      * be NCHW, the data storage order of: [batch, channels, height, width].
416      * NCHW is supported since HAL version 1.2.
417      *
418      * Both explicit padding and implicit padding are supported.
419      *
420      * Inputs (explicit padding):
421      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
422      *      specifying the input.
423      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
424      *      specifying the filter.
425      *      For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
426      *      the channel dimension (SymmPerChannelQuantParams::channelDim)
427      *      must be set to 3.
428      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
429      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
430      *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
431      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
432      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
433      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
434      *      of 0 and bias_scale == input_scale * filter_scale.
435      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
436      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
437      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
438      *      bias_scale[i] = input_scale * filter_scale[i].
439      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
440      *      the left, in the ‘width’ dimension.
441      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
442      *      the right, in the ‘width’ dimension.
443      * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
444      *      the top, in the ‘height’ dimension.
445      * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
446      *      the bottom, in the ‘height’ dimension.
447      * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
448      *      walking through input in the ‘width’ dimension.
449      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
450      *      walking through input in the ‘height’ dimension.
451      * * 9: An {@link OperandType::INT32} scalar, specifying the depthwise
452      *      multiplier.
453      * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
454      *       {@link FusedActivationFunc} values. Specifies the activation to
455      *       invoke on the result.
456      * * 11: An optional {@link OperandType::BOOL} scalar, default to false.
457      *       Set to true to specify NCHW data layout for input0 and output0.
458      *       Available since HAL version 1.2.
459      * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
460      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
461      *      cells between each filter element on width dimension. If this input is set,
462      *      input 13 (dilation factor for height) must be specified as well.
463      *      Available since HAL version 1.2.
464      * * 13: An optional {@link OperandType::INT32} scalar, specifying the dilation
465      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
466      *      cells between each filter element on height dimension. If this input is set,
467      *      input 12 (dilation factor for width) must be specified as well.
468      *      Available since HAL version 1.2.
469      *
470      * Inputs (implicit padding):
471      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
472      *      specifying the input.
473      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
474      *      specifying the filter.
475      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
476      *      tensor of type {@link OperandType::TENSOR_FLOAT32}
477      *      or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type.
478      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
479      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
480      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
481      *      of 0 and bias_scale == input_scale * filter_scale.
482      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
483      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
484      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
485      *      bias_scale[i] = input_scale * filter_scale[i].
486      * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
487      *      padding scheme, has to be one of the
488      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
489      * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
490      *      walking through input in the ‘width’ dimension.
491      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
492      *      walking through input in the ‘height’ dimension.
493      * * 6: An {@link OperandType::INT32} scalar, specifying the depthwise
494      *      multiplier.
495      * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
496      *      {@link FusedActivationFunc} values. Specifies the activation to
497      *      invoke on the result.
498      * * 8: An optional {@link OperandType::BOOL} scalar, default to false.
499      *      Set to true to specify NCHW data layout for input0 and output0.
500      *      Available since HAL version 1.2.
501      * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
502      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
503      *      cells between each filter element on width dimension. If this input is set,
504      *      input 10 (dilation factor for height) must be specified as well.
505      *      Available since HAL version 1.2.
506      * * 10: An optional {@link OperandType::INT32} scalar, specifying the dilation
507      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
508      *      cells between each filter element on height dimension. If this input is set,
509      *      input 9 (dilation factor for width) must be specified as well.
510      *      Available since HAL version 1.2.
511      *
512      * Outputs:
513      * * 0: The output 4-D tensor, of shape
514      *      [batches, out_height, out_width, depth_out]. Before HAL version 1.2, for
515      *      output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
516      *      the following condition must be satisfied:
517      *      output_scale > input_scale * filter_scale
518      */
519     DEPTHWISE_CONV_2D = 4,
520 
521     /**
522      * Rearranges data from depth into blocks of spatial data.
523      *
524      * More specifically, this op outputs a copy of the input tensor where
525      * values from the depth dimension are moved in spatial blocks to the height
526      * and width dimensions. The value block_size indicates the input block size
527      * and how the data is moved.
528      *
529      * Chunks of data of size block_size * block_size from depth are rearranged
530      * into non-overlapping blocks of size block_size x block_size.
531      *
532      * The width of the output tensor is input_depth * block_size, whereas the
533      * height is input_height * block_size. The depth of the input tensor must
534      * be divisible by block_size * block_size
535      *
536      * Supported tensor {@link OperandType}:
537      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
538      * * {@link OperandType::TENSOR_FLOAT32}
539      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
540      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
541      *
542      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
543      * With the default data layout NHWC, the data is stored in the order of:
544      * [batch, height, width, channels]. Alternatively, the data layout could
545      * be NCHW, the data storage order of: [batch, channels, height, width].
546      * NCHW is supported since HAL version 1.2.
547      *
548      * Inputs:
549      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
550      *      specifying the input.
551      * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
552      *      block_size must be >=1 and block_size * block_size must be a divisor
553      *      of the input depth.
554      * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
555      *      Set to true to specify NCHW data layout for input0 and output0.
556      *      Available since HAL version 1.2.
557      *
558      * Outputs:
559      * * 0: The output 4-D tensor, of shape [batch, height*block_size,
560      *      width*block_size, depth/(block_size*block_size)].
561      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
562      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
563      *      the scale and zeroPoint must be the same as input0.
564      */
565     DEPTH_TO_SPACE = 5,
566 
567     /**
568      * Dequantizes the input tensor.
569      *
570      * The formula is:
571      *
572      *     output = (input - zeroPoint) * scale.
573      *
574      * Supported input tensor {@link OperandType}:
575      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
576      * * {@link OperandType::TENSOR_QUANT8_SYMM} (since HAL version 1.2)
577      * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since HAL version 1.2)
578      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
579      *
580      * Supported output tensor {@link OperandType}:
581      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
582      * * {@link OperandType::TENSOR_FLOAT32}.
583      *
584      * Supported tensor rank: up to 4
585      *
586      * Inputs:
587      * * 0: A tensor.
588      *      Since HAL version 1.2, this tensor may be zero-sized.
589      *
590      * Outputs:
591      * * 0: A tensor with the same shape as input0.
592      */
593     DEQUANTIZE = 6,
594 
595     /**
596      * Looks up sub-tensors in the input tensor.
597      *
598      * This operator takes for input a tensor of values (Values) and
599      * a one-dimensional tensor of selection indices (Lookups).
600      * The output tensor is the concatenation of sub-tensors of Values as
601      * selected by Lookups.
602      *
603      * Think of Values as being sliced along its first dimension:
604      * The entries in Lookups select which slices are concatenated together
605      * to create the output tensor.
606      *
607      * For example, if Values has shape of [40, 200, 300] and
608      * Lookups has shape of [3], all three values found in Lookups are
609      * expected to be between 0 and 39. The resulting tensor must
610      * have shape of [3, 200, 300].
611      *
612      * If a value in Lookups is out of bounds, the operation must fail
613      * and an error must be reported.
614      *
615      * Supported value tensor {@link OperandType}:
616      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.3)
617      * * {@link OperandType::TENSOR_FLOAT32}
618      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.2)
619      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
620      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
621      *
622      * Supported value tensor rank: from 2
623      *
624      * Inputs:
625      * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}.
626      *      The values are indices into the first dimension of Values.
627      * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
628      *      extracted.
629      *
630      * Output:
631      * * 0: A n-D tensor with the same rank and shape as the Values
632      *      tensor, except for the first dimension which has the same size
633      *      as Lookups' only dimension.
634      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
635      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
636      *      the scale and zeroPoint must be the same as input1.
637      */
638     EMBEDDING_LOOKUP = 7,
639 
640     /**
641      * Computes element-wise floor() on the input tensor.
642      *
643      * Supported tensor {@link OperandType}:
644      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
645      * * {@link OperandType::TENSOR_FLOAT32}
646      *
647      * Supported tensor rank: up to 4
648      *
649      * Inputs:
650      * * 0: A tensor.
651      *
652      * Outputs:
653      * * 0: The output tensor, of the same {@link OperandType} and dimensions as
654      *      the input tensor.
655      */
656     FLOOR = 8,
657 
658     /**
659      * Denotes a fully (densely) connected layer, which connects all elements
660      * in the input tensor with each element in the output tensor.
661      *
662      * This layer implements the operation:
663      *
664      *     outputs = activation(inputs * weights’ + bias)
665      *
666      * Supported tensor {@link OperandType}:
667      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
668      * * {@link OperandType::TENSOR_FLOAT32}
669      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
670      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
671      *
672      * Supported tensor rank: up to 4.
673      *
674      * Inputs:
675      * * 0: A tensor of at least rank 2, specifying the input. If rank is
676      *      greater than 2, then it gets flattened to a 2-D Tensor. The
677      *      (flattened) 2-D Tensor is reshaped (if necessary) to
678      *      [batch_size, input_size], where "input_size" corresponds to the
679      *      number of inputs to the layer, matching the second dimension of
680      *      weights, and "batch_size" is calculated by dividing the number of
681      *      elements by "input_size".
682      *      Since HAL version 1.2, zero batch_size is supported for this tensor.
683      * * 1: A 2-D tensor, specifying the weights, of shape
684      *      [num_units, input_size], where "num_units" corresponds to the number
685      *      of output nodes.
686      * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
687      *      tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
688      *      also be of {@link OperandType::TENSOR_FLOAT32}.
689      *      For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
690      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
691      *      the bias should be of {@link OperandType::TENSOR_INT32},
692      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
693      * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
694      *      {@link FusedActivationFunc} values. Specifies the activation to
695      *      invoke on the result.
696      *
697      * Outputs:
698      * * 0: The output tensor, of shape [batch_size, num_units]. Before HAL version 1.2, for
699      *      output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
700      *      condition must be satisfied: output_scale > input_scale * filter_scale.
701      */
702     FULLY_CONNECTED = 9,
703 
704     /**
705      * Looks up sub-tensors in the input tensor using a key-value map.
706      *
707      * This operator takes for input a tensor of values (Values),
708      * a one-dimensional tensor of selection values (Lookups) and
709      * a one-dimensional tensor that maps these values to Values
710      * indexes. The output tensor is the concatenation of sub-tensors of
711      * Values as selected by Lookups via Keys.
712      *
713      * Think of Values as being sliced along its outer-most dimension.
714      * The output is a concatenation of selected slices, with one slice
715      * for each entry of Lookups. The slice selected is the one at the
716      * same index as the Maps entry that matches the value in Lookups.
717      *
718      * For a hit, the corresponding sub-tensor of Values is included
719      * in the Output tensor. For a miss, the corresponding sub-tensor in
720      * Output must have zero values.
721      *
722      * For example, if Values has shape of [40, 200, 300],
723      * Keys should have a shape of [40]. If Lookups tensor has shape
724      * of [3], three slices are being concatenated, so the resulting tensor
725      * must have the shape of [3, 200, 300]. If the first entry in Lookups
726      * has the value 123456, that value must be located in Keys tensor.
727      * If the sixth entry of Keys contains 123456, the sixth slice of Values
728      * must be selected. If no entry in Keys has 123456, a slice of zeroes
729      * must be concatenated.
730      *
731      * Supported value tensor {@link OperandType}:
732      * * {@link OperandType::TENSOR_FLOAT32}
733      * * {@link OperandType::TENSOR_INT32}
734      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
735      *
736      * Supported value tensor rank: from 2
737      *
738      * Inputs:
739      * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with
740      *      shape [ k ].
741      * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape
742      *      [ n ]; Keys and Values pair represent a map, i.e., the ith element
743      *      in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
744      *      (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
745      *      ascending order.
746      * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
747      *      must be n.
748      *
749      * Outputs:
750      * * 0: Output. A tensor with shape [ k …].
751      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
752      *      the scale and zeroPoint must be the same as input2.
753      * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
754      *      hits (True) or not (False).
755      *      Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
756      *      and scale 1.0f.
757      *      A non-zero byte represents True, a hit. A zero indicates otherwise.
758      */
759     HASHTABLE_LOOKUP = 10,
760 
761     /**
762      * Applies L2 normalization along the axis dimension.
763      *
764      * The values in the output tensor are computed as:
765      *
766      *     output[batch, row, col, channel] =
767      *         input[batch, row, col, channel] /
768      *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
769      *
770      * By default the axis dimension is the last dimension of the input tensor.
771      *
772      * Supported tensor {@link OperandType}:
773      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
774      * * {@link OperandType::TENSOR_FLOAT32}
775      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
776      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
777      *
778      * Supported tensor rank: up to 4
779      * Tensors with rank less than 4 are only supported since HAL version 1.2.
780      *
781      * Inputs:
782      * * 0: An n-D tensor, specifying the tensor to be normalized.
783      * * 1: An optional {@link OperandType::INT32} scalar, default to -1,
784      *      specifying the dimension normalization would be performed on.
785      *      Negative index is used to specify axis from the end (e.g. -1 for
786      *      the last axis). Must be in the range [-n, n).
787      *      Available since HAL version 1.2.
788      *
789      * Outputs:
790      * * 0: A tensor of the same {@link OperandType} and same shape as input0.
791      *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
792      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
793      *      For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
794      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
795      *
796      *      NOTE: Before HAL version 1.3, if the elements along an axis are all zeros,
797      *      the result is undefined. Since HAL version 1.3, if the elements along an axis
798      *      are all zeros, the result is logical zero.
799      */
800     L2_NORMALIZATION = 11,
801 
802     /**
803      * Performs an 2-D L2 pooling operation.
804      *
805      * The output dimensions are functions of the filter dimensions, stride, and
806      * padding.
807      *
808      * The values in the output tensor are computed as:
809      *
810      *     output[b, i, j, c] =
811      *         sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
812      *              sum(1))
813      *
814      * Supported tensor {@link OperandType}:
815      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
816      * * {@link OperandType::TENSOR_FLOAT32}
817      *
818      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
819      * With the default data layout NHWC, the data is stored in the order of:
820      * [batch, height, width, channels]. Alternatively, the data layout could
821      * be NCHW, the data storage order of: [batch, channels, height, width].
822      * NCHW is supported since HAL version 1.2.
823      *
824      * Both explicit padding and implicit padding are supported.
825      *
826      * Inputs (explicit padding):
827      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
828      *      the input.
829      *      Since HAL version 1.2, zero batches is supported for this tensor.
830      * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
831      *      the left, in the ‘width’ dimension.
832      * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
833      *      the right, in the ‘width’ dimension.
834      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
835      *      the top, in the ‘height’ dimension.
836      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
837      *      the bottom, in the ‘height’ dimension.
838      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
839      *      walking through input in the ‘width’ dimension.
840      * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
841      *      walking through input in the ‘height’ dimension.
842      * * 7: An {@link OperandType::INT32} scalar, specifying the filter
843      *      width.
844      * * 8: An {@link OperandType::INT32} scalar, specifying the filter
845      *      height.
846      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
847      *      {@link FusedActivationFunc} values. Specifies the activation to
848      *      invoke on the result.
849      * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
850      *       Set to true to specify NCHW data layout for input0 and output0.
851      *       Available since HAL version 1.2.
852      *
853      * Inputs (implicit padding):
854      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
855      *      the input.
856      *      Since HAL version 1.2, zero batches is supported for this tensor.
857      * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
858      *      padding scheme, has to be one of the
859      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
860      * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
861      *      walking through input in the ‘width’ dimension.
862      * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
863      *      walking through input in the ‘height’ dimension.
864      * * 4: An {@link OperandType::INT32} scalar, specifying the filter
865      *      width.
866      * * 5: An {@link OperandType::INT32} scalar, specifying the filter
867      *      height.
868      * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
869      *      {@link FusedActivationFunc} values. Specifies the activation to
870      *      invoke on the result.
871      * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
872      *      Set to true to specify NCHW data layout for input0 and output0.
873      *      Available since HAL version 1.2.
874      *
875      * Outputs:
876      * * 0: The output 4-D tensor, of shape
877      *      [batches, out_height, out_width, depth].
878      */
879     L2_POOL_2D = 12,
880 
881     /**
882      * Applies Local Response Normalization along the depth dimension.
883      *
884      * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
885      * last dimension), and each vector is normalized independently. Within a
886      * given vector, each component is divided by the weighted, squared sum of
887      * inputs within depth_radius.
888      *
889      * The output is calculated using this formula:
890      *
891      *     sqr_sum[a, b, c, d] = sum(
892      *         pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
893      *     output = input / pow((bias + alpha * sqr_sum), beta)
894      *
895      * For input tensor with rank less than 4, independently normalizes each
896      * 1-D slice along specified dimension.
897      *
898      * Supported tensor {@link OperandType}:
899      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
900      * * {@link OperandType::TENSOR_FLOAT32}
901      *
902      * Supported tensor rank: up to 4
903      * Tensors with rank less than 4 are only supported since HAL version 1.2.
904      *
905      * Inputs:
906      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
907      *      the input.
908      * * 1: An {@link OperandType::INT32} scalar, specifying the radius of
909      *      the normalization window.
910      * * 2: A scalar, specifying the bias, must not be zero.
911      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the bias
912      *      value must be of {@link OperandType::FLOAT16}.
913      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
914      *      value must be of {@link OperandType::FLOAT32}.
915      * * 3: A scalar, specifying the scale factor, alpha.
916      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
917      *      alpha value must be of {@link OperandType::FLOAT16}.
918      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
919      *      alpha value must be of {@link OperandType::FLOAT32}.
920      * * 4: A scalar, specifying the exponent, beta.
921      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
922      *      value must be of {@link OperandType::FLOAT16}.
923      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
924      *      value must be of {@link OperandType::FLOAT32}.
925      * * 5: An optional {@link OperandType::INT32} scalar, default to -1,
926      *      specifying the dimension normalization would be performed on.
927      *      Negative index is used to specify axis from the end (e.g. -1 for
928      *      the last axis). Must be in the range [-n, n).
929      *      Available since HAL version 1.2.
930      *
931      * Outputs:
932      * * 0: The output tensor of same shape as input0.
933      */
934     LOCAL_RESPONSE_NORMALIZATION = 13,
935 
936     /**
937      * Computes sigmoid activation on the input tensor element-wise.
938      *
939      * The output is calculated using this formula:
940      *
941      *     output = 1 / (1 + exp(-input))
942      *
943      * Supported tensor {@link OperandType}:
944      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
945      * * {@link OperandType::TENSOR_FLOAT32}
946      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
947      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
948      *
949      * Supported tensor rank: up to 4.
950      *
951      * Inputs:
952      * * 0: A tensor, specifying the input.
953      *      Since HAL version 1.2, this tensor may be zero-sized.
954      *
955      * Outputs:
956      * * 0: The output tensor of same shape as input0.
957      *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
958      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
959      *      For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
960      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
961      */
962     LOGISTIC = 14,
963 
964     /**
965      * Projects an input to a bit vector via locality senstive hashing.
966      *
967      * Supported input tensor {@link OperandType}:
968      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
969      * * {@link OperandType::TENSOR_FLOAT32}
970      * * {@link OperandType::TENSOR_INT32}
971      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
972      *
973      * Supported input tensor rank: from 1
974      *
975      * Inputs:
976      * * 0: Hash functions. Dim.size == 2, DataType: Float.
977      *      Tensor[0].Dim[0]: Number of hash functions.
978      *      Tensor[0].Dim[1]: Number of projected output bits generated by each
979      *      hash function.
980      *      If the projection type is Sparse:
981      *      Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
982      *
983      * * 1: Input. Dim.size >= 1, no restriction on DataType.
984      * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
985      *      If not set, each input element is considered to have the same weight
986      *      of 1.0.
987      *      Tensor[1].Dim[0] == Tensor[2].Dim[0]
988      * * 3: Type:
989      *        Sparse:
990      *          Value LSHProjectionType_SPARSE(=3) (since HAL version 1.2).
991      *          Computed bit vector is considered to be sparse.
992      *          Each output element is an int32 made up of multiple bits
993      *          computed from hash functions.
994      *
995      *          NOTE: To avoid collisions across hash functions, an offset value
996      *          of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
997      *          where k is the index of the hash function.
998      *
999      *          Value LSHProjectionType_SPARSE_DEPRECATED(=1).
1000      *          Legacy behavior that does not include the offset value.
1001      *
1002      *        Dense:
1003      *          Value LSHProjectionType_DENSE(=2).
1004      *          Computed bit vector is considered to be dense. Each output
1005      *          element represents a bit and can take the value of either
1006      *          0 or 1.
1007      *
1008      * Outputs:
1009      * * 0: If the projection type is Sparse:
1010      *      Output.Dim == { Tensor[0].Dim[0] }
1011      *      A tensor of int32 that represents hash signatures.
1012      *
1013      *      If the projection type is Dense:
1014      *      Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
1015      *      A flattened tensor that represents projected bit vectors.
1016      * The offset value for sparse projections was added in HAL version 1.2.
1017      */
1018     LSH_PROJECTION = 15,
1019 
1020     /**
1021      * Performs a single time step in a Long Short-Term Memory (LSTM) layer
1022      *
1023      * The LSTM operation is described by the following equations.
1024      *
1025      * \f{eqnarray*}{
1026      * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
1027      * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
1028      * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
1029      *        g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
1030      * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
1031      *      & & \\
1032      *      & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
1033      *      & if\ there\ is\ a\ projection; \\
1034      * h_t =& & \\
1035      *      & o_t \odot g(C_t) & otherwise. \\
1036      * \f}
1037      * Where:
1038      * * \f$x_t\f$ is the input,
1039      * * \f$i_t\f$ is the input gate,
1040      * * \f$f_t\f$ is the forget gate,
1041      * * \f$C_t\f$ is the cell state,
1042      * * \f$o_t\f$ is the output,
1043      * * \f$h_t\f$ is the output state,
1044      * * \f$\sigma\f$ is the logistic sigmoid function,
1045      * * \f$g\f$ is the cell input and cell output activation function, usually
1046      *   \f$tahn\f$,
1047      * * \f$W_{xi}\f$ is the input-to-input weight matrix,
1048      * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
1049      * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
1050      * * \f$b_i\f$ is the input gate bias,
1051      * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
1052      * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
1053      * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
1054      * * \f$b_f\f$ is the forget gate bias,
1055      * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
1056      * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
1057      * * \f$b_c\f$ is the cell bias,
1058      * * \f$W_{xo}\f$ is the input-to-output weight matrix,
1059      * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
1060      * * \f$W_{co}\f$ is the cell-to-output weight matrix,
1061      * * \f$b_o\f$ is the output gate bias,
1062      * * \f$W_{proj}\f$ is the projection weight matrix,
1063      * * \f$b_{proj}\f$ is the projection bias,
1064      * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
1065      * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
1066      * * \f$\odot\f$ is the
1067      *   <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
1068      *   Hadamard product</a> that takes two matrices and produces another
1069      *   matrix, each element of which is the product of the corresponding
1070      *   elements of the input matrices.
1071      *
1072      * Since HAL version 1.2 LSTM supports layer normalization.
1073      * In case layer normalization is used, the inputs to internal activation
1074      * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
1075      * following an approach from section 3.1 from
1076      * https://arxiv.org/pdf/1607.06450.pdf
1077      *
1078      * The operation has the following independently optional inputs:
1079      * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
1080      *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
1081      *   have values or neither of them have values (i.e., all set to null). If
1082      *   they have values, the peephole optimization is used.
1083      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
1084      *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
1085      *   or none of them have values. If they have no values, coupling of input
1086      *   and forget gates (CIFG) is used, in which case the input gate
1087      *   (\f$i_t\f$) is calculated using the following equation instead.
1088      *   \f{eqnarray*}{
1089      *   i_t = 1 - f_t
1090      *   \f}
1091      *   In case peephole optimization is used and CIFG is not used
1092      *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
1093      *   cell-to-input weights must have no value.
1094      * * The projection weights (\f$W_{proj}\f$) is required only for the
1095      *   recurrent projection layer, and should otherwise have no value.
1096      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
1097      *   value if the recurrent projection layer exists, and should otherwise
1098      *   have no value.
1099      * * (HAL version 1.2 or later) The four layer normalization weights either all have
1100      *   values or none of them have values. Additionally, if CIFG is used,
1101      *   input layer normalization weights tensor is omitted and the other layer
1102      *   normalization weights either all have values or none of them have
1103      *   values. Layer normalization is used when the values of all the layer
1104      *   normalization weights are present.
1105      *
1106      * References:
1107      *
1108      * The default non-peephole non-CIFG implementation is based on:
1109      * http://www.bioinf.jku.at/publications/older/2604.pdf
1110      * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
1111      * Computation, 9(8):1735-1780, 1997.
1112      *
1113      * The peephole implementation and projection layer is based on:
1114      * https://research.google.com/pubs/archive/43905.pdf
1115      * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
1116      * recurrent neural network architectures for large scale acoustic
1117      * modeling." INTERSPEECH, 2014.
1118      * (However, the concept of peephole optimization was introduced in work
1119      * prior to this paper.)
1120      *
1121      * The coupling of input and forget gate (CIFG) is based on:
1122      * http://arxiv.org/pdf/1503.04069.pdf
1123      * Greff et al. "LSTM: A Search Space Odyssey"
1124      *
1125      * The layer normalization is based on:
1126      * https://arxiv.org/pdf/1607.06450.pdf
1127      * Jimmy Ba et al. "Layer Normalization"
1128      *
1129      * Supported tensor {@link OperandType}:
1130      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1131      * * {@link OperandType::TENSOR_FLOAT32}
1132      *
1133      * All input and output tensors must be of the same type.
1134      *
1135      * Inputs:
1136      * * 0: The input (\f$x_t\f$).
1137      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1138      *      corresponds to the batching dimension, and “input_size” is the size
1139      *      of the input.
1140      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
1141      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1142      *      corresponds to the number of cell units.
1143      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
1144      *      A 2-D tensor of shape [num_units, input_size].
1145      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
1146      *      A 2-D tensor of shape [num_units, input_size].
1147      * * 4: The input-to-output weights (\f$W_{xo}\f$).
1148      *      A 2-D tensor of shape [num_units, input_size].
1149      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
1150      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
1151      *      corresponds to either the number of cell units (i.e., “num_units”),
1152      *      or the second dimension of the “projection_weights”, if defined.
1153      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
1154      *      A 2-D tensor of shape [num_units, output_size].
1155      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
1156      *      A 2-D tensor of shape [num_units, output_size].
1157      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
1158      *      A 2-D tensor of shape [num_units, output_size].
1159      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
1160      *      A 1-D tensor of shape [num_units].
1161      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
1162      *      A 1-D tensor of shape [num_units].
1163      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
1164      *      A 1-D tensor of shape [num_units].
1165      * * 12:The input gate bias (\f$b_i\f$). Optional.
1166      *      A 1-D tensor of shape [num_units].
1167      * * 13:The forget gate bias (\f$b_f\f$).
1168      *      A 1-D tensor of shape [num_units].
1169      * * 14:The cell bias (\f$b_c\f$).
1170      *      A 1-D tensor of shape [num_units].
1171      * * 15:The output gate bias (\f$b_o\f$).
1172      *      A 1-D tensor of shape [num_units].
1173      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
1174      *      A 2-D tensor of shape [output_size, num_units].
1175      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
1176      *      A 1-D tensor of shape [output_size].
1177      * * 18:The output state (in) (\f$h_{t-1}\f$).
1178      *      A 2-D tensor of shape [batch_size, output_size].
1179      * * 19:The cell state (in) (\f$C_{t-1}\f$).
1180      *      A 2-D tensor of shape [batch_size, num_units].
1181      * * 20:The activation function (\f$g\f$).
1182      *      A value indicating the activation function:
1183      *      <ul>
1184      *      <li>0: None;
1185      *      <li>1: Relu;
1186      *      <li>3: Relu6;
1187      *      <li>4: Tanh;
1188      *      <li>6: Sigmoid.
1189      *      </ul>
1190      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
1191      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
1192      *      then clipping is disabled.
1193      *      Until HAL version 1.2 this scalar must be of type {@link
1194      *      OperandType::FLOAT32}. Since HAL version 1.2, if all the input
1195      *      tensors have type {@link OperandType::TENSOR_FLOAT32}, this
1196      *      scalar must be of the type {@link OperandType::FLOAT32},
1197      *      otherwise if all the input tensors have the type {@link
1198      *      OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
1199      *      OperandType::FLOAT16}.
1200      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
1201      *      projection layer, such that values are bound within
1202      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1203      *      Until HAL version 1.2 this scalar must be of type {@link
1204      *      OperandType::FLOAT32}. Since HAL version 1.2, if all the input
1205      *      tensors have type {@link OperandType::TENSOR_FLOAT32}, this
1206      *      scalar must be of the type {@link OperandType::FLOAT32},
1207      *      otherwise if all the input tensors have the type {@link
1208      *      OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
1209      *      OperandType::FLOAT16}.
1210      * Since HAL version 1.2 there are additional inputs to this op:
1211      * * 23:The input layer normalization weights.
1212      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1213      *      to activation at input gate.
1214      * * 24:The forget layer normalization weights.
1215      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1216      *      to activation at forget gate.
1217      * * 25:The cell layer normalization weights.
1218      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1219      *      to activation at cell gate.
1220      * * 26:The output layer normalization weights.
1221      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1222      *      to activation at output gate.
1223      *
1224      * Outputs:
1225      * * 0: The scratch buffer.
1226      *      A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
1227      *      [batch_size, num_units * 4] without CIFG.
1228      * * 1: The output state (out) (\f$h_t\f$).
1229      *      A 2-D tensor of shape [batch_size, output_size].
1230      * * 2: The cell state (out) (\f$C_t\f$).
1231      *      A 2-D tensor of shape [batch_size, num_units].
1232      * * 3: The output (\f$o_t\f$).
1233      *      A 2-D tensor of shape [batch_size, output_size]. This is effectively
1234      *      the same as the current “output state (out)” value.
1235      */
1236     LSTM = 16,
1237 
1238     /**
1239      * Performs an 2-D max pooling operation.
1240      *
1241      * The output dimensions are functions of the filter dimensions, stride, and
1242      * padding.
1243      *
1244      * The values in the output tensor are computed as:
1245      *
1246      *     output[b, i, j, channel] =
1247      *         max_{di, dj} (
1248      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
1249      *         )
1250      *
1251      * Supported tensor {@link OperandType}:
1252      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1253      * * {@link OperandType::TENSOR_FLOAT32}
1254      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1255      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1256      *
1257      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1258      * With the default data layout NHWC, the data is stored in the order of:
1259      * [batch, height, width, channels]. Alternatively, the data layout could
1260      * be NCHW, the data storage order of: [batch, channels, height, width].
1261      * NCHW is supported since HAL version 1.2.
1262      *
1263      * Both explicit padding and implicit padding are supported.
1264      *
1265      * Inputs (explicit padding):
1266      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1267      *      the input.
1268      *      Since HAL version 1.2, zero batches is supported for this tensor.
1269      * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
1270      *      the left, in the ‘width’ dimension.
1271      * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
1272      *      the right, in the ‘width’ dimension.
1273      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
1274      *      the top, in the ‘height’ dimension.
1275      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
1276      *      the bottom, in the ‘height’ dimension.
1277      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
1278      *      walking through input in the ‘width’ dimension.
1279      * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
1280      *      walking through input in the ‘height’ dimension.
1281      * * 7: An {@link OperandType::INT32} scalar, specifying the filter
1282      *      width.
1283      * * 8: An {@link OperandType::INT32} scalar, specifying the filter
1284      *      height.
1285      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
1286      *      {@link FusedActivationFunc} values. Specifies the activation to
1287      *      invoke on the result.
1288      * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
1289      *       Set to true to specify NCHW data layout for input0 and output0.
1290      *       Available since HAL version 1.2.
1291      *
1292      * Inputs (implicit padding):
1293      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1294      *      the input.
1295      *      Since HAL version 1.2, zero batches is supported for this tensor.
1296      * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
1297      *      padding scheme, has to be one of the
1298      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
1299      * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
1300      *      walking through input in the ‘width’ dimension.
1301      * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
1302      *      walking through input in the ‘height’ dimension.
1303      * * 4: An {@link OperandType::INT32} scalar, specifying the filter
1304      *      width.
1305      * * 5: An {@link OperandType::INT32} scalar, specifying the filter
1306      *      height.
1307      * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
1308      *      {@link FusedActivationFunc} values. Specifies the activation to
1309      *      invoke on the result.
1310      * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
1311      *      Set to true to specify NCHW data layout for input0 and output0.
1312      *      Available since HAL version 1.2.
1313      *
1314      * Outputs:
1315      * * 0: The output 4-D tensor, of shape
1316      *      [batches, out_height, out_width, depth].
1317      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1318      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1319      *      the scale and zeroPoint must be the same as input0.
1320      */
1321     MAX_POOL_2D = 17,
1322 
1323     /**
1324      * Multiplies two tensors, element-wise.
1325      *
1326      * Takes two input tensors of identical {@link OperandType} and compatible
1327      * dimensions. The output is the product of both input tensors, optionally
1328      * modified by an activation function.
1329      *
1330      * Two dimensions are compatible when:
1331      *     1. they are equal, or
1332      *     2. one of them is 1
1333      *
1334      * The size of the resulting output is the maximum size along each dimension
1335      * of the input operands. It starts with the trailing dimensions, and works
1336      * its way forward.
1337      *
1338      * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
1339      * dimension is only compatible with 0 or 1. The size of the output
1340      * dimension is zero if either of corresponding input dimension is zero.
1341      *
1342      * Supported tensor {@link OperandType}:
1343      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1344      * * {@link OperandType::TENSOR_FLOAT32}
1345      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1346      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1347      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
1348      *
1349      * Supported tensor rank: up to 4
1350      *
1351      * Inputs:
1352      * * 0: A tensor.
1353      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
1354      *      as input0.
1355      * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
1356      *      {@link FusedActivationFunc} values. Specifies the activation to
1357      *      invoke on the result.
1358      *      For a {@link OperandType::TENSOR_INT32} tensor,
1359      *      the {@link FusedActivationFunc} must be "NONE".
1360      *
1361      * Outputs:
1362      * * 0: The product, a tensor of the same {@link OperandType} as input0.
1363      *      For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
1364      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
1365      *      the following condition must be satisfied:
1366      *      output_scale > input1_scale * input2_scale.
1367      */
1368     MUL = 18,
1369 
1370     /**
1371      * Computes rectified linear activation on the input tensor element-wise.
1372      *
1373      * The output is calculated using this formula:
1374      *
1375      *     output = max(0, input)
1376      *
1377      * Supported tensor {@link OperandType}:
1378      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1379      * * {@link OperandType::TENSOR_FLOAT32}
1380      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1381      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1382      *
1383      * Supported tensor rank: up to 4.
1384      *
1385      * Inputs:
1386      * * 0: A tensor, specifying the input.
1387      *      Since HAL version 1.2, this tensor may be zero-sized.
1388      *
1389      * Outputs:
1390      * * 0: The output tensor of same shape as input0.
1391      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1392      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1393      *      the scale and zeroPoint must be the same as input0.
1394      */
1395     RELU = 19,
1396 
1397     /**
1398      * Computes rectified linear 1 activation on the input tensor element-wise.
1399      *
1400      * The output is calculated using this formula:
1401      *
1402      *     output = min(1.f, max(-1.f, input))
1403      *
1404      * Supported tensor {@link OperandType}:
1405      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1406      * * {@link OperandType::TENSOR_FLOAT32}
1407      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1408      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1409      *
1410      * Supported tensor rank: up to 4.
1411      *
1412      * Inputs:
1413      * * 0: A tensor, specifying the input.
1414      *      Since HAL version 1.2, this tensor may be zero-sized.
1415      *
1416      * Outputs:
1417      * * 0: The output tensor of the same shape as input0.
1418      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1419      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1420      *      the scale and zeroPoint must be the same as input0.
1421      */
1422     RELU1 = 20,
1423 
1424     /**
1425      * Computes rectified linear 6 activation on the input tensor element-wise.
1426      *
1427      * The output is calculated using this formula:
1428      *
1429      *     output = min(6, max(0, input))
1430      *
1431      * Supported tensor {@link OperandType}:
1432      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1433      * * {@link OperandType::TENSOR_FLOAT32}
1434      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1435      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1436      *
1437      * Supported tensor rank: up to 4.
1438      *
1439      * Inputs:
1440      * * 0: A tensor, specifying the input.
1441      *      Since HAL version 1.2, this tensor may be zero-sized.
1442      *
1443      * Outputs:
1444      * * 0: The output tensor of same shape as input0.
1445      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1446      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1447      *      the scale and zeroPoint must be the same as input0.
1448      */
1449     RELU6 = 21,
1450 
1451     /**
1452      * Reshapes a tensor.
1453      *
1454      * Given tensor, this operation returns a tensor that has the same values as
1455      * tensor, but with a newly specified shape.
1456      *
1457      * Supported tensor {@link OperandType}:
1458      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1459      * * {@link OperandType::TENSOR_FLOAT32}
1460      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1461      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1462      *
1463      * Supported tensor rank: up to 4.
1464      *
1465      * Inputs:
1466      * * 0: A tensor, specifying the tensor to be reshaped.
1467      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}, defining the
1468      *      shape of the output tensor. The number of elements implied by shape
1469      *      must be the same as the number of elements in the input tensor.
1470      *
1471      *      If one component of shape is the special value -1, the size of that
1472      *      dimension is computed so that the total size remains constant. In
1473      *      particular, a shape of [-1] flattens into 1-D. At most one component
1474      *      of shape can be -1.
1475      *
1476      * Outputs:
1477      * * 0: The output tensor, of shape specified by the input shape.
1478      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1479      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1480      *      the scale and zeroPoint must be the same as input0.
1481      */
1482     RESHAPE = 22,
1483 
1484     /**
1485      * Resizes images to given size using the bilinear interpretation.
1486      *
1487      * Resized images must be distorted if their output aspect ratio is not the
1488      * same as input aspect ratio. The corner pixels of output may not be the
1489      * same as corner pixels of input.
1490      *
1491      * Supported tensor {@link OperandType}:
1492      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1493      * * {@link OperandType::TENSOR_FLOAT32}
1494      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
1495      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1496      *
1497      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1498      * With the default data layout NHWC, the data is stored in the order of:
1499      * [batch, height, width, channels]. Alternatively, the data layout could
1500      * be NCHW, the data storage order of: [batch, channels, height, width].
1501      * NCHW is supported since HAL version 1.2.
1502      *
1503      * Both resizing by shape and resizing by scale are supported.
1504      *
1505      * Inputs (resizing by shape):
1506      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1507      *      the input.
1508      *      Since HAL version 1.2, zero batches is supported for this tensor.
1509      * * 1: An {@link OperandType::INT32} scalar, specifying the output
1510      *      width of the output tensor.
1511      * * 2: An {@link OperandType::INT32} scalar, specifying the output
1512      *      height of the output tensor.
1513      * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
1514      *      Set to true to specify NCHW data layout for input0 and output0.
1515      *      Available since HAL version 1.2.
1516      * * 4: Align corners. An optional {@link OperandType::BOOL}
1517      *      scalar, default to false.  If True, the centers of the 4 corner
1518      *      pixels of the input and output tensors are aligned, preserving the
1519      *      values at the corner pixels.
1520      *      Available since HAL version 1.3.
1521      * * 5: Half pixel centers. An optional {@link OperandType::BOOL}
1522      *      scalar, default to false. If True, the pixel centers are assumed to
1523      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1524      *      TF 2.0. If this parameter is True, then align_corners parameter
1525      *      must be False.
1526      *      Available since HAL version 1.3.
1527      *
1528      * Inputs (resizing by scale, since HAL version 1.2):
1529      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1530      *      the input. Zero batches is supported for this tensor.
1531      * * 1: A scalar, specifying width_scale, the scaling factor of the width
1532      *      dimension from the input tensor to the output tensor. The output
1533      *      width is calculated as new_width = floor(width * width_scale).
1534      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
1535      *      of {@link OperandType::TENSOR_FLOAT16} and of
1536      *      {@link OperandType::FLOAT32} otherwise.
1537      * * 2: A scalar, specifying height_scale, the scaling factor of the height
1538      *      dimension from the input tensor to the output tensor. The output
1539      *      height is calculated as new_height = floor(height * height_scale).
1540      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
1541      *      of {@link OperandType::TENSOR_FLOAT16} and of
1542      *      {@link OperandType::FLOAT32} otherwise.
1543      * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
1544      *      Set to true to specify NCHW data layout for input0 and output0.
1545      * * 4: Align corners. An optional {@link OperandType::BOOL}
1546      *      scalar, default to false.  If True, the centers of the 4 corner
1547      *      pixels of the input and output tensors are aligned, preserving the
1548      *      values at the corner pixels.
1549      *      Available since HAL version 1.3.
1550      * * 5: Half pixel centers. An optional {@link OperandType::BOOL}
1551      *      scalar, default to false. If True, the pixel centers are assumed to
1552      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1553      *      TF 2.0. If this parameter is True, then align_corners parameter
1554      *      must be False.
1555      *      Available since HAL version 1.3.
1556      *
1557      * Outputs:
1558      * * 0: The output 4-D tensor, of shape
1559      *      [batches, new_height, new_width, depth].
1560      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1561      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1562      *      the scale and zeroPoint must be the same as input0.
1563      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
1564      *      the scale and zeroPoint must be the same as input0.
1565      */
1566     RESIZE_BILINEAR = 23,
1567 
1568     /**
1569      * A basic recurrent neural network layer.
1570      *
1571      * This layer implements the operation:
1572      * outputs = state = activation(inputs * input_weights +
1573      *                              state * recurrent_weights + bias)
1574      *
1575      * Where:
1576      * * “input_weights” is a weight matrix that multiplies the inputs;
1577      * * “recurrent_weights” is a weight matrix that multiplies the current
1578      *    “state” which itself is the output from the previous time step
1579      *    computation;
1580      * * “bias” is a bias vector (added to each output vector in the batch);
1581      * * “activation” is the function passed as the “fused_activation_function”
1582      *   argument (if not “NONE”).
1583      *
1584      * Supported tensor {@link OperandType}:
1585      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1586      * * {@link OperandType::TENSOR_FLOAT32}
1587      *
1588      * The input tensors must all be the same type.
1589      *
1590      * Inputs:
1591      * * 0: input.
1592      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1593      *      corresponds to the batching dimension, and “input_size” is the size
1594      *      of the input.
1595      * * 1: weights.
1596      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1597      *      corresponds to the number of units.
1598      * * 2: recurrent_weights.
1599      *      A 2-D tensor of shape [num_units, num_units], with columns
1600      *      corresponding to the weights from each unit.
1601      * * 3: bias.
1602      *      A 1-D tensor of shape [num_units].
1603      * * 4: hidden state (in).
1604      *      A 2-D tensor of shape [batch_size, num_units].
1605      * * 5: fused_activation_function.
1606      *      An optional {@link FusedActivationFunc} value indicating the
1607      *      activation function. If “NONE” is specified then it results in a
1608      *      linear activation.
1609      *
1610      * Outputs:
1611      * * 0: hidden state (out).
1612      *      A 2-D tensor of shape [batch_size, num_units].
1613      *
1614      * * 1: output.
1615      *      A 2-D tensor of shape [batch_size, num_units]. This is effectively
1616      *      the same as the current state value.
1617      */
1618     RNN = 24,
1619 
1620     /**
1621      * Computes the softmax activation on the input tensor element-wise, per
1622      * batch, by normalizing the input vector so the maximum coefficient is
1623      * zero.
1624      *
1625      * The output is calculated using this formula:
1626      *
1627      *     output[batch, i] =
1628      *         exp((input[batch, i] - max(input[batch, :])) * beta) /
1629      *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
1630      *
1631      * For input tensor with rank other than 2, the activation will be applied
1632      * independently on each 1-D slice along specified dimension.
1633      *
1634      * Supported tensor {@link OperandType}:
1635      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1636      * * {@link OperandType::TENSOR_FLOAT32}
1637      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1638      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1639      *
1640      * Supported tensor rank: up to 4.
1641      * Tensors with rank other than 2 or 4 are only supported since HAL version 1.2.
1642      *
1643      * Inputs:
1644      * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
1645      *      Since HAL version 1.2, this tensor may be zero-sized.
1646      * * 1: A scalar, specifying the positive scaling factor for the exponent,
1647      *      beta. If input0 is of {@link OperandType::TENSOR_FLOAT32},
1648      *      {@link OperandType::TENSOR_QUANT8_ASYMM} or
1649      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, the scalar
1650      *      must be of {@link OperandType::FLOAT32}.
1651      *      If input0 is of {@link OperandType::TENSOR_FLOAT16}, then the
1652      *      scalar must be of {@link OperandType::FLOAT16}.
1653      * * 2: An optional {@link OperandType::INT32} scalar, default to -1,
1654      *      specifying the dimension the activation would be performed on.
1655      *      Negative index is used to specify axis from the end (e.g. -1 for
1656      *      the last axis). Must be in the range [-n, n).
1657      *      Available since HAL version 1.2.
1658      *
1659      * Outputs:
1660      * * 0: The output tensor of same shape as input0.
1661      *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
1662      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1663      *      For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
1664      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1665      */
1666     SOFTMAX = 25,
1667 
1668     /**
1669      * Rearranges blocks of spatial data, into depth.
1670      *
1671      * More specifically, this op outputs a copy of the input tensor where
1672      * values from the height and width dimensions are moved to the depth
1673      * dimension. The value block_size indicates the input block size and how
1674      * the data is moved.
1675      *
1676      * Chunks of data of size block_size * block_size from depth are rearranged
1677      * into non-overlapping blocks of size block_size x block_size.
1678      *
1679      * The depth of the output tensor is input_depth * block_size * block_size.
1680      * The input tensor's height and width must be divisible by block_size.
1681      *
1682      * Supported tensor {@link OperandType}:
1683      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1684      * * {@link OperandType::TENSOR_FLOAT32}
1685      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1686      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1687      *
1688      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1689      * With the default data layout NHWC, the data is stored in the order of:
1690      * [batch, height, width, channels]. Alternatively, the data layout could
1691      * be NCHW, the data storage order of: [batch, channels, height, width].
1692      * NCHW is supported since HAL version 1.2.
1693      *
1694      * Inputs:
1695      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
1696      *      specifying the input.
1697      * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
1698      *      block_size must be >=1 and block_size must be a divisor of both the
1699      *      input height and width.
1700      * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
1701      *      Set to true to specify NCHW data layout for input0 and output0.
1702      *      Available since HAL version 1.2.
1703      *
1704      * Outputs:
1705      * * 0: The output 4-D tensor, of shape [batches, height/block_size,
1706      *      width/block_size, depth_in*block_size*block_size].
1707      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1708      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1709      *      the scale and zeroPoint must be the same as input0.
1710      */
1711     SPACE_TO_DEPTH = 26,
1712 
1713     /**
1714      * SVDF op is a kind of stateful layer derived from the notion that a
1715      * densely connected layer that's processing a sequence of input frames can
1716      * be approximated by using a singular value decomposition of each of its
1717      * nodes. The implementation is based on:
1718      *
1719      * https://research.google.com/pubs/archive/43813.pdf
1720      *
1721      * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
1722      * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
1723      * INTERSPEECH, 2015.
1724      *
1725      * It processes the incoming input using a 2-stage filtering mechanism:
1726      * * stage 1 performs filtering on the "features" dimension, whose outputs
1727      *   get pushed into a memory of fixed-size memory_size.
1728      * * stage 2 performs filtering on the "time" dimension of the memory_size
1729      *   memoized outputs of stage 1.
1730      *
1731      * Specifically, for rank 1, this layer implements the operation:
1732      *
1733      *     memory = push(conv1d(inputs, weights_feature, feature_dim,
1734      *                          "PADDING_VALID"));
1735      *     outputs = activation(memory * weights_time + bias);
1736      *
1737      * Where:
1738      * * “weights_feature” is a weights matrix that processes the inputs (by
1739      *   convolving the input with every “feature filter”), and whose outputs
1740      *   get pushed, stacked in order, into the fixed-size “memory” (the oldest
1741      *   entry gets dropped);
1742      * * “weights_time” is a weights matrix that processes the “memory” (by a
1743      *   batched matrix multiplication on the num_units);
1744      * * “bias” is an optional bias vector (added to each output vector in the
1745      *   batch); and
1746      * * “activation” is the function passed as the “fused_activation_function”
1747      *   argument (if not “NONE”).
1748      *
1749      * Each rank adds a dimension to the weights matrices by means of stacking
1750      * the filters.
1751      *
1752      * Supported tensor {@link OperandType}:
1753      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1754      * * {@link OperandType::TENSOR_FLOAT32}
1755      *
1756      * All input tensors must be the same type.
1757      *
1758      * Inputs:
1759      * * 0: input.
1760      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1761      *      corresponds to the batching dimension, and “input_size” is the size
1762      *      of the input.
1763      * * 1: weights_feature.
1764      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1765      *      corresponds to the number of units.
1766      * * 2: weights_time.
1767      *      A 2-D tensor of shape [num_units, memory_size], where “memory_size”
1768      *      corresponds to the fixed-size of the memory.
1769      * * 3: bias.
1770      *      An optional 1-D tensor of shape [num_units].
1771      * * 4: state (in).
1772      *      A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
1773      * * 5: rank.
1774      *      The rank of the SVD approximation.
1775      * * 6: fused_activation_function.
1776      *      An optional {@link FusedActivationFunc} value indicating the
1777      *      activation function. If “NONE” is specified then it results in a
1778      *      linear activation.
1779      *
1780      * Outputs:
1781      * * 0: state (out).
1782      *      A 2-D tensor of the same {@link OperandType} as the inputs, with shape
1783      *      [batch_size, (memory_size - 1) * num_units * rank].
1784      * * 1: output.
1785      *      A 2-D tensor of the same {@link OperandType} as the inputs, with shape
1786      *      [batch_size, num_units].
1787      */
1788     SVDF = 27,
1789 
1790     /**
1791      * Computes hyperbolic tangent of input tensor element-wise.
1792      *
1793      * The output is calculated using this formula:
1794      *
1795      *     output = tanh(input)
1796      *
1797      * Supported tensor {@link OperandType}:
1798      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1799      * * {@link OperandType::TENSOR_FLOAT32}
1800      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
1801      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1802      *
1803      * Supported tensor rank: up to 4.
1804      *
1805      * Inputs:
1806      * * 0: A tensor, specifying the input.
1807      *      Since HAL version 1.2, this tensor may be zero-sized.
1808      *
1809      * Outputs:
1810      * * 0: The output tensor of same shape as input0.
1811      *      For {@link OperandType::TENSOR_QUANT8_ASYMM},
1812      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
1813      *      For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
1814      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
1815      */
1816     TANH = 28,
1817 
1818     /**
1819      * BatchToSpace for N-dimensional tensors.
1820      *
1821      * This operation reshapes the batch dimension (dimension 0) into M + 1
1822      * dimensions of shape block_shape + [batch], interleaves these blocks back
1823      * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
1824      * result with the same rank as the input.
1825      *
1826      * This is the reverse of SpaceToBatch.
1827      *
1828      * Supported tensor {@link OperandType}:
1829      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1830      * * {@link OperandType::TENSOR_FLOAT32}
1831      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1832      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1833      *
1834      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1835      * With the default data layout NHWC, the data is stored in the order of:
1836      * [batch, height, width, channels]. Alternatively, the data layout could
1837      * be NCHW, the data storage order of: [batch, channels, height, width].
1838      * NCHW is supported since HAL version 1.2.
1839      *
1840      * Inputs:
1841      * * 0: An n-D tensor, specifying the tensor to be reshaped
1842      * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
1843      *      sizes for each spatial dimension of the input tensor. All values
1844      *      must be >= 1.
1845      * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
1846      *      Set to true to specify NCHW data layout for input0 and output0.
1847      *      Available since API level 29.
1848      *
1849      * Outputs:
1850      * * 0: A tensor of the same {@link OperandType} as input0.
1851      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1852      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1853      *      the scale and zeroPoint must be the same as input0.
1854      */
1855     BATCH_TO_SPACE_ND = 29,
1856 
1857     /**
1858      * Element-wise division of two tensors.
1859      *
1860      * Takes two input tensors of identical {@link OperandType} and compatible
1861      * dimensions. The output is the result of dividing the first input tensor
1862      * by the second, optionally modified by an activation function.
1863      *
1864      * For inputs of {@link OperandType::TENSOR_INT32}, performs
1865      * "floor division" ("//" in Python). For example,
1866      *     5 // 2 = 2
1867      *    -5 // 2 = -3
1868      *
1869      * Two dimensions are compatible when:
1870      *     1. they are equal, or
1871      *     2. one of them is 1
1872      *
1873      * The size of the output is the maximum size along each dimension of the
1874      * input operands. It starts with the trailing dimensions, and works its way
1875      * forward.
1876      *
1877      * Example:
1878      *     input1.dimension =    {4, 1, 2}
1879      *     input2.dimension = {5, 4, 3, 1}
1880      *     output.dimension = {5, 4, 3, 2}
1881      *
1882      * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
1883      * dimension is only compatible with 0 or 1. The size of the output
1884      * dimension is zero if either of corresponding input dimension is zero.
1885      *
1886      * Supported tensor {@link OperandType}:
1887      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1888      * * {@link OperandType::TENSOR_FLOAT32}
1889      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
1890      *
1891      * Supported tensor rank: up to 4
1892      *
1893      * Inputs:
1894      * * 0: An n-D tensor, specifying the first input.
1895      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
1896      *      as input0.
1897      * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
1898      *      {@link FusedActivationFunc} values. Specifies the activation to
1899      *      invoke on the result.
1900      *      For a {@link OperandType::TENSOR_INT32} tensor,
1901      *      the {@link FusedActivationFunc} must be "NONE".
1902      *
1903      * Outputs:
1904      * * 0: A tensor of the same {@link OperandType} as input0.
1905      */
1906     DIV = 30,
1907 
1908     /**
1909      * Computes the mean of elements across dimensions of a tensor.
1910      *
1911      * Reduces the input tensor along the given dimensions to reduce. Unless
1912      * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
1913      * in axis. If keep_dims is true, the reduced dimensions are retained with
1914      * length 1.
1915      *
1916      * Supported tensor {@link OperandType}:
1917      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1918      * * {@link OperandType::TENSOR_FLOAT32}
1919      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1920      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1921      *
1922      * Supported tensor rank: up to 4
1923      *
1924      * Inputs:
1925      * * 0: A tensor, specifying the input.
1926      * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}. The dimensions
1927      *      to reduce. Must be in the range
1928      *      [-rank(input_tensor), rank(input_tensor)).
1929      *
1930      *      NOTE: When the operation was introduced, the documentation
1931      *      incorrectly stated that if dimensions were empty, the operation
1932      *      would reduce across all dimensions. This behavior was never
1933      *      implemented.
1934      *
1935      * * 2: An {@link OperandType::INT32} scalar, keep_dims. If positive,
1936      *      retains reduced dimensions with length 1.
1937      *
1938      * Outputs:
1939      * * 0: A tensor of the same {@link OperandType} as input0.
1940      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1941      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1942      *      the scale and zeroPoint must be the same as input0.
1943      *      If all dimensions are reduced and keep_dims is false, the output
1944      *      shape is [1].
1945      */
1946     MEAN = 31,
1947 
1948     /**
1949      * Pads a tensor.
1950      *
1951      * This operation pads a tensor according to the specified paddings.
1952      *
1953      * Supported tensor {@link OperandType}:
1954      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
1955      * * {@link OperandType::TENSOR_FLOAT32}
1956      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1957      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
1958      *   (full support since HAL version 1.2, see the output section)
1959      *
1960      * Supported tensor rank: up to 4
1961      *
1962      * Inputs:
1963      * * 0: An n-D tensor, specifying the tensor to be padded.
1964      * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
1965      *      for each spatial dimension of the input tensor. The shape of the
1966      *      tensor must be {rank(input0), 2}.
1967      *      padding[i, 0] specifies the number of elements to be padded in the
1968      *      front of dimension i.
1969      *      padding[i, 1] specifies the number of elements to be padded after the
1970      *      end of dimension i.
1971      *
1972      * Outputs:
1973      * * 0: A tensor of the same {@link OperandType} as input0. The
1974      *      output tensor has the same rank as input0, and each
1975      *      dimension of the output tensor has the same size as the
1976      *      corresponding dimension of the input tensor plus the size
1977      *      of the padding:
1978      *          output0.dimension[i] =
1979      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
1980      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
1981      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1982      *      the scale and zeroPoint must be the same as input0.
1983      *
1984      *      NOTE: Before HAL version 1.2, the pad value for
1985      *      {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
1986      *      Since HAL version 1.2, the pad value is always the logical zero.
1987      */
1988     PAD = 32,
1989 
1990     /**
1991      * SpaceToBatch for N-Dimensional tensors.
1992      *
1993      * This operation divides "spatial" dimensions [1, ..., M] of the input into
1994      * a grid of blocks of shape block_shape, and interleaves these blocks with
1995      * the "batch" dimension (0) such that in the output, the spatial dimensions
1996      * [1, ..., M] correspond to the position within the grid, and the batch
1997      * dimension combines both the position within a spatial block and the
1998      * original batch position. Prior to division into blocks, the spatial
1999      * dimensions of the input are optionally zero padded according to paddings.
2000      *
2001      * Supported tensor {@link OperandType}:
2002      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2003      * * {@link OperandType::TENSOR_FLOAT32}
2004      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2005      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2006      *   (full support since HAL version 1.2, see the output section)
2007      *
2008      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2009      * With the default data layout NHWC, the data is stored in the order of:
2010      * [batch, height, width, channels]. Alternatively, the data layout could
2011      * be NCHW, the data storage order of: [batch, channels, height, width].
2012      * NCHW is supported since HAL version 1.2.
2013      *
2014      * Inputs:
2015      * * 0: An n-D tensor, specifying the input.
2016      * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
2017      *      sizes for each spatial dimension of the input tensor. All values
2018      *      must be >= 1.
2019      * * 2: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
2020      *      for each spatial dimension of the input tensor. All values must be
2021      *      >= 0. The shape of the tensor must be {M, 2}, where M is the number
2022      *      of spatial dimensions.
2023      *      padding[i, 0] specifies the number of element to be padded in the
2024      *      front of dimension i.
2025      *      padding[i, 1] specifies the number of element to be padded after the
2026      *      end of dimension i.
2027      * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
2028      *      Set to true to specify NCHW data layout for input0 and output0.
2029      *      Available since HAL version 1.2.
2030      *
2031      * Outputs:
2032      * * 0: A tensor of the same {@link OperandType} as input0.
2033      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2034      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2035      *      the scale and zeroPoint must be the same as input0.
2036      *
2037      *      NOTE: Before HAL version 1.2, the pad value for
2038      *      {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
2039      *      Since HAL version 1.2, the pad value is always the logical zero.
2040      */
2041     SPACE_TO_BATCH_ND = 33,
2042 
2043     /**
2044      * Removes dimensions of size 1 from the shape of a tensor.
2045      *
2046      * Given a tensor input, this operation returns a tensor of the same
2047      * {@link OperandType} with all dimensions of size 1 removed. If you don't
2048      * want to remove all size 1 dimensions, you can remove specific size 1
2049      * dimensions by specifying the axes (input1).
2050      *
2051      * Supported tensor {@link OperandType}:
2052      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2053      * * {@link OperandType::TENSOR_FLOAT32}
2054      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2055      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2056      *
2057      * Supported tensor rank: up to 4
2058      *
2059      * Inputs:
2060      * * 0: An n-D tensor, the tensor to be squeezed.
2061      * * 1: An optional 1-D tensor of {@link OperandType::TENSOR_INT32}. The
2062      *      dimensions to squeeze. If specified only squeezes the dimensions
2063      *      listed. Otherwise, squeezes all dimensions. The dimension index
2064      *      starts at 0. An error must be reported if squeezing a dimension that
2065      *      is not 1.
2066      *
2067      * Outputs:
2068      * * 0: A tensor of the same {@link OperandType} as input0. Contains the
2069      *      same data as input, but has one or more dimensions of size 1
2070      *      removed.
2071      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2072      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2073      *      the scale and zeroPoint must be the same as input0.
2074      *      If all input dimensions are equal to 1 and are to be squeezed, the
2075      *      output shape is [1].
2076      */
2077     SQUEEZE = 34,
2078 
2079     /**
2080      * Extracts a strided slice of a tensor.
2081      *
2082      * Roughly speaking, this op extracts a slice of size (end - begin) / stride
2083      * from the given input tensor. Starting at the location specified by begin
2084      * the slice continues by adding stride to the index until all dimensions
2085      * are not less than end. Note that a stride can be negative, which causes a
2086      * reverse slice.
2087      *
2088      * Supported tensor {@link OperandType}:
2089      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2090      * * {@link OperandType::TENSOR_FLOAT32}
2091      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2092      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2093      *
2094      * Supported tensor rank: up to 4
2095      *
2096      * Inputs:
2097      * * 0: An n-D tensor, specifying the tensor to be sliced.
2098      * * 1: begin, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
2099      *      starts of the dimensions of the input tensor to be sliced. The
2100      *      length must be of rank(input0).
2101      * * 2: end, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
2102      *      ends of the dimensions of the input tensor to be sliced. The length
2103      *      must be of rank(input0).
2104      * * 3: strides, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
2105      *      strides of the dimensions of the input tensor to be sliced. The
2106      *      length must be of rank(input0). The entries must be non-zero.
2107      * * 4: begin_mask, an {@link OperandType::INT32} scalar. If the ith bit
2108      *      of begin_mask is set, begin[i] is ignored and the fullest possible
2109      *      range in that dimension is used instead.
2110      * * 5: end_mask, an {@link OperandType::INT32} scalar. If the ith bit of
2111      *      end_mask is set, end[i] is ignored and the fullest possible range in
2112      *      that dimension is used instead.
2113      * * 6: shrink_axis_mask, an {@link OperandType::INT32} scalar. If the
2114      *      ith bit of shrink_axis_mask is set, the ith dimension specification
2115      *      shrinks the dimensionality by 1, taking on the value at index
2116      *      begin[i]. In this case, the ith specification must define a
2117      *      slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
2118      *
2119      * Outputs:
2120      * * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
2121      *      where k is the number of bits set in shrink_axis_mask.
2122      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2123      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2124      *      the scale and zeroPoint must be the same as input0.
2125      *      If shrink_axis_mask is true for all input dimensions, the output
2126      *      shape is [1].
2127      */
2128     STRIDED_SLICE = 35,
2129 
2130     /**
2131      * Element-wise subtraction of two tensors.
2132      *
2133      * Takes two input tensors of identical {@link OperandType} and compatible
2134      * dimensions. The output is the result of subtracting the second input
2135      * tensor from the first one, optionally modified by an activation function.
2136      *
2137      * Two dimensions are compatible when:
2138      *     1. they are equal, or
2139      *     2. one of them is 1
2140      *
2141      * The size of the output is the maximum size along each dimension of the
2142      * input operands. It starts with the trailing dimensions, and works its way
2143      * forward.
2144      *
2145      * Example:
2146      *     input1.dimension =    {4, 1, 2}
2147      *     input2.dimension = {5, 4, 3, 1}
2148      *     output.dimension = {5, 4, 3, 2}
2149      *
2150      * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
2151      * dimension is only compatible with 0 or 1. The size of the output
2152      * dimension is zero if either of corresponding input dimension is zero.
2153      *
2154      * Supported tensor {@link OperandType}:
2155      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2156      * * {@link OperandType::TENSOR_FLOAT32}
2157      * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
2158      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2159      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
2160      *
2161      * Supported tensor rank: up to 4
2162      *
2163      * Inputs:
2164      * * 0: An n-D tensor, specifying the first input.
2165      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
2166      *      as input0.
2167      * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
2168      *      {@link FusedActivationFunc} values. Specifies the activation to
2169      *      invoke on the result.
2170      *      For a {@link OperandType::TENSOR_INT32} tensor,
2171      *      the {@link FusedActivationFunc} must be "NONE".
2172      *
2173      * Outputs:
2174      * * 0: A tensor of the same {@link OperandType} as input0.
2175      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2176      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2177      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
2178      */
2179     SUB = 36,
2180 
2181     /**
2182      * Transposes the input tensor, permuting the dimensions according to the
2183      * perm tensor.
2184      *
2185      * The returned tensor's dimension i corresponds to the input dimension
2186      * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
2187      * rank of the input tensor. Hence by default, this operation performs a
2188      * regular matrix transpose on 2-D input Tensors.
2189      *
2190      * Supported tensor {@link OperandType}:
2191      * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
2192      * * {@link OperandType::TENSOR_FLOAT32}
2193      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2194      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2195      *
2196      * Supported tensor rank: up to 4
2197      *
2198      * Inputs:
2199      * * 0: An n-D tensor, specifying the tensor to be transposed.
2200      *      Since HAL version 1.2, this tensor may be zero-sized.
2201      * * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
2202      *      the permutation of the dimensions of the input tensor.
2203      *
2204      * Outputs:
2205      * * 0: A tensor of the same {@link OperandType} as input0.
2206      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2207      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2208      *      the scale and zeroPoint must be the same as input0.
2209      */
2210     TRANSPOSE = 37,
2211 
2212     /**
2213      * Computes the absolute value of a tensor, element-wise.
2214      *
2215      * Supported tensor {@link OperandType}:
2216      * * {@link OperandType::TENSOR_FLOAT16}
2217      * * {@link OperandType::TENSOR_FLOAT32}
2218      * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3)
2219      *
2220      * Supported tensor rank: from 1.
2221      *
2222      * Inputs:
2223      * * 0: A tensor.
2224      *
2225      * Outputs:
2226      * * 0: The output tensor of same shape as input0.
2227      */
2228     ABS = 38,
2229 
2230     /**
2231      * Returns the index of the largest element along an axis.
2232      *
2233      * Supported tensor {@link OperandType}:
2234      * * {@link OperandType::TENSOR_FLOAT16}
2235      * * {@link OperandType::TENSOR_FLOAT32}
2236      * * {@link OperandType::TENSOR_INT32}
2237      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2238      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2239      *
2240      * Supported tensor rank: from 1
2241      *
2242      * Inputs:
2243      * * 0: An n-D tensor specifying the input. Must be non-empty.
2244      * * 1: An {@link OperandType::INT32} scalar specifying the axis to
2245      *      reduce across. Negative index is used to specify axis from the
2246      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2247      *
2248      * Outputs:
2249      * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
2250      *      If input is 1-dimensional, the output shape is [1].
2251      */
2252     // There is no underscore in ARG_MAX to avoid name conflict with
2253     // the macro defined in libc/kernel/uapi/linux/limits.h.
2254     ARGMAX = 39,
2255 
2256     /**
2257      * Returns the index of the smallest element along an axis.
2258      *
2259      * Supported tensor {@link OperandType}:
2260      * * {@link OperandType::TENSOR_FLOAT16}
2261      * * {@link OperandType::TENSOR_FLOAT32}
2262      * * {@link OperandType::TENSOR_INT32}
2263      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2264      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2265      *
2266      * Supported tensor rank: from 1
2267      *
2268      * Inputs:
2269      * * 0: An n-D tensor specifying the input. Must be non-empty.
2270      * * 1: An {@link OperandType::INT32} scalar specifying the axis to
2271      *      reduce across. Negative index is used to specify axis from the
2272      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2273      *
2274      * Outputs:
2275      * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
2276      *      If input is 1-dimensional, the output shape is [1].
2277      */
2278     ARGMIN = 40,  // See ARGMAX for naming discussion.
2279 
2280     /**
2281      * Transform axis-aligned bounding box proposals using bounding box deltas.
2282      *
2283      * Given the positions of bounding box proposals and the corresponding
2284      * bounding box deltas for each class, return the refined bounding box
2285      * regions. The resulting bounding boxes are cliped against the edges of
2286      * the image.
2287      *
2288      * Supported tensor {@link OperandType}:
2289      * * {@link OperandType::TENSOR_FLOAT16}
2290      * * {@link OperandType::TENSOR_FLOAT32}
2291      * * {@link OperandType::TENSOR_QUANT16_ASYMM}
2292      *
2293      * Inputs:
2294      * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
2295      *      bounding box proposals, each line with format [x1, y1, x2, y2].
2296      *      For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM},
2297      *      the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
2298      *      is supported for this tensor.
2299      * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
2300      *      bounding box delta for each region of interest and each class. The
2301      *      bounding box deltas are organized in the following order
2302      *      [dx, dy, dw, dh], where dx and dy is the relative correction factor
2303      *      for the center position of the bounding box with respect to the width
2304      *      and height, dw and dh is the log-scale relative correction factor
2305      *      for the width and height. For input0 of type
2306      *      {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
2307      *      of {@link OperandType::TENSOR_QUANT8_ASYMM} or
2308      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is
2309      *      supported for this tensor.
2310      * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
2311      *      [num_rois], specifying the batch index of each box. Boxes with
2312      *      the same batch index are grouped together. Zero num_rois is
2313      *      supported for this tensor.
2314      * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
2315      *      each image in the batch, each line with format
2316      *      [image_height, image_width].
2317      *
2318      * Outputs:
2319      * * 0: A tensor of the same {@link OperandType} as input0, with shape
2320      *      [num_rois, num_classes * 4], specifying the coordinates of each
2321      *      output bounding box for each class, with format [x1, y1, x2, y2].
2322      *      For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
2323      *      scale must be 0.125 and the zero point must be 0.
2324      */
2325     AXIS_ALIGNED_BBOX_TRANSFORM = 41,
2326 
2327     /**
2328      * A recurrent neural network layer that applies an LSTM cell to a
2329      * sequence of inputs in forward and backward directions.
2330      *
2331      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2332      * one input into the two RNN cells in the following way:
2333      *
2334      *       INPUT  (INPUT_REVERSED)
2335      *         |         |
2336      *    ---------------------
2337      *    | FW_LSTM   BW_LSTM |
2338      *    ---------------------
2339      *         |         |
2340      *      FW_OUT     BW_OUT
2341      *
2342      * An op with cross-linking takes two inputs and feeds them into the RNN
2343      * cells in the following way:
2344      *
2345      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2346      *           |             |
2347      *     INPUT | (INPUT_R'D.)|
2348      *       |   |       |     |
2349      *    -----------------------
2350      *    |  \  /        \    / |
2351      *    | FW_LSTM     BW_LSTM |
2352      *    -----------------------
2353      *         |           |
2354      *      FW_OUT      BW_OUT
2355      *
2356      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2357      * weights are present. While stacking this op on top of itself, this
2358      * allows to connect both forward and backward outputs from previous cell
2359      * to the next cell's input.
2360      *
2361      * Since HAL version 1.3 parallel linking mode is supported. The mode is
2362      * enabled if auxiliary input is present but auxiliary weights are omitted.
2363      * In this case, the cell feeds inputs into the RNN in the following way:
2364      *
2365      *       INPUT (AUX_INPUT_REVERSED)
2366      *         |         |
2367      *    ---------------------
2368      *    | FW_LSTM   BW_LSTM |
2369      *    ---------------------
2370      *         |         |
2371      *      FW_OUT     BW_OUT
2372      *
2373      * While stacking this op on top of itself, this allows to connect both
2374      * forward and backward outputs from previous cell to the next cell's
2375      * corresponding inputs.
2376      *
2377      * Supported tensor {@link OperandType}:
2378      * * {@link OperandType::TENSOR_FLOAT16}
2379      * * {@link OperandType::TENSOR_FLOAT32}
2380      *
2381      * Supported tensor rank: 3, either time-major or batch-major.
2382      *
2383      * All input and output tensors must be of the same type.
2384      *
2385      * Inputs:
2386      * * 0: The input.
2387      *      A 3-D tensor of shape:
2388      *        If time-major: [max_time, batch_size, input_size]
2389      *        If batch-major: [batch_size, max_time, input_size]
2390      *      where "max_time" is the number of timesteps (sequence length),
2391      *      "batch_size" corresponds to the batching dimension, and
2392      *      "input_size" is the size of the input.
2393      * * 1: The forward input-to-input weights. Optional.
2394      *      A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
2395      *      corresponds to the number of forward cell units.
2396      * * 2: The forward input-to-forget weights.
2397      *      A 2-D tensor of shape [fw_num_units, input_size].
2398      * * 3: The forward input-to-cell weights.
2399      *      A 2-D tensor of shape [fw_num_units, input_size].
2400      * * 4: The forward input-to-output weights.
2401      *      A 2-D tensor of shape [fw_num_units, input_size].
2402      * * 5: The forward recurrent-to-input weights. Optional.
2403      *      A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
2404      *      corresponds to either the number of cell units (i.e., fw_num_units),
2405      *      or the second dimension of the “fw_projection_weights”, if defined.
2406      * * 6: The forward recurrent-to-forget weights.
2407      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2408      * * 7: The forward recurrent-to-cell weights.
2409      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2410      * * 8: The forward recurrent-to-output weights.
2411      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2412      * * 9: The forward cell-to-input weights. Optional.
2413      *      A 1-D tensor of shape [fw_num_units].
2414      * * 10: The forward cell-to-forget weights. Optional.
2415      *       A 1-D tensor of shape [fw_num_units].
2416      * * 11: The forward cell-to-output weights. Optional.
2417      *       A 1-D tensor of shape [fw_num_units].
2418      * * 12: The forward input gate bias. Optional.
2419      *       A 1-D tensor of shape [fw_num_units].
2420      * * 13: The forward forget gate bias.
2421      *       A 1-D tensor of shape [fw_num_units].
2422      * * 14: The forward cell gate bias.
2423      *       A 1-D tensor of shape [fw_num_units].
2424      * * 15: The forward output gate bias.
2425      *       A 1-D tensor of shape [fw_num_units].
2426      * * 16: The forward projection weights. Optional.
2427      *       A 2-D tensor of shape [fw_output_size, fw_num_units].
2428      * * 17: The forward projection bias. Optional.
2429      *       A 1-D tensor of shape [fw_output_size].
2430      * * 18: The backward input-to-input weights. Optional.
2431      *       A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
2432      *       corresponds to the number of backward cell units.
2433      * * 19: The backward input-to-forget weights.
2434      *       A 2-D tensor of shape [bw_num_units, input_size].
2435      * * 20: The backward input-to-cell weights.
2436      *       A 2-D tensor of shape [bw_num_units, input_size].
2437      * * 21: The backward input-to-output weights.
2438      *       A 2-D tensor of shape [bw_num_units, input_size].
2439      * * 22: The backward recurrent-to-input weights. Optional.
2440      *       A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
2441      *       corresponds to either the number of cell units (i.e., “bw_num_units”),
2442      *       or the second dimension of the “bw_projection_weights”, if defined.
2443      * * 23: The backward recurrent-to-forget weights.
2444      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2445      * * 24: The backward recurrent-to-cell weights.
2446      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2447      * * 25: The backward recurrent-to-output weights.
2448      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2449      * * 26: The backward cell-to-input weights. Optional.
2450      *       A 1-D tensor of shape [bw_num_units].
2451      * * 27: The backward cell-to-forget weights. Optional.
2452      *       A 1-D tensor of shape [bw_num_units].
2453      * * 28: The backward cell-to-output weights. Optional.
2454      *       A 1-D tensor of shape [bw_num_units].
2455      * * 29: The backward input gate bias. Optional.
2456      *       A 1-D tensor of shape [bw_num_units].
2457      * * 30: The backward forget gate bias.
2458      *       A 1-D tensor of shape [bw_num_units].
2459      * * 31: The backward cell gate bias.
2460      *       A 1-D tensor of shape [bw_num_units].
2461      * * 32: The backward output gate bias.
2462      *       A 1-D tensor of shape [bw_num_units].
2463      * * 33: The backward projection weights. Optional.
2464      *       A 2-D tensor of shape [bw_output_size, bw_num_units].
2465      * * 34: The backward projection bias. Optional.
2466      *       A 1-D tensor of shape [bw_output_size].
2467      * * 35: The forward input activation state.
2468      *       A 2-D tensor of shape [batch_size, bw_output_size].
2469      * * 36: The forward input cell state.
2470      *       A 2-D tensor of shape [batch_size, bw_num_units].
2471      * * 37: The backward input activation state.
2472      *       A 2-D tensor of shape [batch_size, bw_output_size].
2473      * * 38: The backward input cell state.
2474      *       A 2-D tensor of shape [batch_size, bw_num_units].
2475      * * 39: The auxiliary input. Optional.
2476      *       A 3-D tensor of shape [max_time, batch_size, aux_input_size],
2477      *       where “batch_size” corresponds to the batching dimension, and
2478      *       “aux_input_size” is the size of the auxiliary input. Optional. See
2479      *       the docs above for the usage modes explanation.
2480      * * 40: The forward auxiliary input-to-input weights.
2481      *       Optional. See the docs above for the usage modes explanation.
2482      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2483      * * 41: The forward auxiliary input-to-forget weights.
2484      *       Optional. See the docs above for the usage modes explanation.
2485      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2486      * * 42: The forward auxiliary input-to-cell weights.
2487      *       Optional. See the docs above for the usage modes explanation.
2488      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2489      * * 43: The forward auxiliary input-to-output weights.
2490      *       Optional. See the docs above for the usage modes explanation.
2491      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2492      * * 44: The backward auxiliary input-to-input weights.
2493      *       Optional. See the docs above for the usage modes explanation.
2494      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2495      * * 45: The backward auxiliary input-to-forget weights.
2496      *       Optional. See the docs above for the usage modes explanation.
2497      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2498      * * 46: The backward auxiliary input-to-cell weights.
2499      *       Optional. See the docs above for the usage modes explanation.
2500      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2501      * * 47: The backward auxiliary input-to-output weights.
2502      *       Optional. See the docs above for the usage modes explanation.
2503      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2504      * * 48: The activation function.
2505      *       A value indicating the activation function:
2506      *       <ul>
2507      *       <li>0: None;
2508      *       <li>1: Relu;
2509      *       <li>3: Relu6;
2510      *       <li>4: Tanh;
2511      *       <li>6: Sigmoid.
2512      *       </ul>
2513      * * 49: The clipping threshold for the cell state, such
2514      *       that values are bound within [-cell_clip, cell_clip]. If set to 0.0
2515      *       then clipping is disabled.
2516      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
2517      *       this scalar must be of the type {@link OperandType::FLOAT32},
2518      *       otherwise if all the input tensors have the type
2519      *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
2520      *       of type {@link OperandType::FLOAT16}.
2521      * * 50: The clipping threshold for the output from the
2522      *       projection layer, such that values are bound within
2523      *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2524      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
2525      *       this scalar must be of the type {@link OperandType::FLOAT32},
2526      *       otherwise if all the input tensors have the type
2527      *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
2528      *       of type {@link OperandType::FLOAT16}.
2529      * * 51: merge_outputs
2530      *       An {@link OperandType::BOOL} scalar specifying if the outputs
2531      *       from forward and backward cells should be merged.
2532      * * 52: time_major
2533      *       An {@link OperandType::BOOL} scalar specifying the shape format
2534      *       of input and output tensors.
2535      * * 53: The forward input layer normalization weights. Optional.
2536      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2537      *       to activation at input gate.
2538      * * 54: The forward forget layer normalization weights. Optional.
2539      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2540      *       to activation at forget gate.
2541      * * 55: The forward cell layer normalization weights. Optional.
2542      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2543      *       to activation at cell gate.
2544      * * 56: The forward output layer normalization weights. Optional.
2545      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2546      *       to activation at output gate.
2547      * * 57: The backward input layer normalization weights. Optional.
2548      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2549      *       to activation at input gate.
2550      * * 58: The backward forget layer normalization weights. Optional.
2551      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2552      *       to activation at forget gate.
2553      * * 59: The backward cell layer normalization weights. Optional.
2554      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2555      *       to activation at cell gate.
2556      * * 60: The backward output layer normalization weights. Optional.
2557      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2558      *       to activation at output gate.
2559      *
2560      * Outputs:
2561      * * 0: The forward output.
2562      *      A 3-D tensor of shape:
2563      *        If time-major and not merge_outputs:
2564      *          [max_time, batch_size, fw_output_size]
2565      *        If time-major and merge_outputs:
2566      *          [max_time, batch_size, fw_output_size + bw_output_size]
2567      *        If batch-major and not merge_outputs:
2568      *          [batch_size, max_time, fw_output_size]
2569      *        If batch-major and merge_outputs:
2570      *          [batch_size, max_time, fw_output_size + bw_output_size]
2571      * * 1: The backward output.  Unused if merge_outputs is true.
2572      *      A 3-D tensor of shape:
2573      *        If time-major: [max_time, batch_size, bw_output_size]
2574      *        If batch-major: [batch_size, max_time, bw_output_size]
2575      * * 2: The forward activation state output.
2576      *      A 2-D tensor of shape [batch_size, fw_output_size] containing an
2577      *      activation state from the last time step in the sequence. This
2578      *      output is optional and can be omitted. If this output is present
2579      *      then outputs 3-5 must be present as well.
2580      *      Available since HAL version 1.3.
2581      * * 3: The forward cell state output.
2582      *      A tensor of shape [batch_size, fw_cell_size] containing a cell state
2583      *      from the last time step in the sequence. This output is optional
2584      *      and can be omitted. If this output is present
2585      *      then outputs 2, 4, 5 must be present as well.
2586      *      Available since HAL version 1.3.
2587      * * 4: The backward activation state output.
2588      *      A 2-D tensor of shape [batch_size, bw_output_size] containing an
2589      *      activation state from the last time step in the sequence. This
2590      *      output is optional and can be omitted. If this output is present
2591      *      then outputs 2, 3, 5 must be present as well.
2592      *      Available since HAL version 1.3.
2593      * * 5: The backward cell state output.
2594      *      A tensor of shape [batch_size, bw_cell_size] containing a cell state
2595      *      from the last time step in the sequence. This output is optional
2596      *      and can be omitted. If this output is present
2597      *      then outputs 2-4 must be present as well.
2598      *      Available since HAL version 1.3.
2599      */
2600     BIDIRECTIONAL_SEQUENCE_LSTM = 42,
2601 
2602     /**
2603      * A recurrent neural network layer that applies a basic RNN cell to a
2604      * sequence of inputs in forward and backward directions.
2605      *
2606      * This Op unrolls the input along the sequence dimension, and implements
2607      * the following operation for each element in the sequence s =
2608      * 1...sequence_length:
2609      *   fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
2610      *          fw_state * fw_recurrent_weights’ + fw_bias)
2611      *
2612      * And for each element in sequence t = sequence_length : 1
2613      *   bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
2614      *          bw_state * bw_recurrent_weights’ + bw_bias)
2615      *
2616      * Where:
2617      * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
2618      * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
2619      *    current “state” which itself is the output from the previous time step
2620      *    computation;
2621      * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
2622      *    batch);
2623      * * “activation” is the function passed as the “fused_activation_function”
2624      *   argument (if not “NONE”).
2625      *
2626      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2627      * one input into the two RNN cells in the following way:
2628      *
2629      *       INPUT  (INPUT_REVERSED)
2630      *         |         |
2631      *    ---------------------
2632      *    | FW_RNN     BW_RNN |
2633      *    ---------------------
2634      *         |         |
2635      *      FW_OUT     BW_OUT
2636      *
2637      * An op with cross-linking takes two inputs and feeds them into the RNN
2638      * cells in the following way:
2639      *
2640      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2641      *           |             |
2642      *     INPUT | (INPUT_R'D.)|
2643      *       |   |       |     |
2644      *    -----------------------
2645      *    |  \  /        \    / |
2646      *    | FW_RNN       BW_RNN |
2647      *    -----------------------
2648      *         |           |
2649      *      FW_OUT      BW_OUT
2650      *
2651      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2652      * weights are present. While stacking this op on top of itself, this
2653      * allows to connect both forward and backward outputs from previous cell
2654      * to the next cell's input.
2655      *
2656      * Since HAL version 1.3 parallel linking mode is supported. The mode is
2657      * enabled if auxiliary input is present but auxiliary weights are omitted.
2658      * In this case, the cell feeds inputs into the RNN in the following way:
2659      *
2660      *       INPUT (AUX_INPUT_REVERSED)
2661      *         |         |
2662      *    ---------------------
2663      *    | FW_RNN     BW_RNN |
2664      *    ---------------------
2665      *         |         |
2666      *      FW_OUT     BW_OUT
2667      *
2668      * While stacking this op on top of itself, this allows to connect both
2669      * forward and backward outputs from previous cell to the next cell's
2670      * corresponding inputs.
2671      *
2672      * Supported tensor {@link OperandType}:
2673      * * {@link OperandType::TENSOR_FLOAT16}
2674      * * {@link OperandType::TENSOR_FLOAT32}
2675      *
2676      * The input tensors must all be the same type.
2677      *
2678      * Inputs:
2679      * * 0: input.
2680      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
2681      *      it is set to true, then the input has a shape [maxTime, batchSize,
2682      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
2683      *      inputSize].
2684      * * 1: fwWeights.
2685      *      A 2-D tensor of shape [fwNumUnits, inputSize].
2686      * * 2: fwRecurrentWeights.
2687      *      A 2-D tensor of shape [fwNumUnits, fwNumUnits].
2688      * * 3: fwBias.
2689      *      A 1-D tensor of shape [fwNumUnits].
2690      * * 4: fwHiddenState.
2691      *      A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
2692      *      state input for the first time step of the computation.
2693      * * 5: bwWeights.
2694      *      A 2-D tensor of shape [bwNumUnits, inputSize].
2695      * * 6: bwRecurrentWeights.
2696      *      A 2-D tensor of shape [bwNumUnits, bwNumUnits].
2697      * * 7: bwBias.
2698      *      A 1-D tensor of shape [bwNumUnits].
2699      * * 8: bwHiddenState
2700      *      A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
2701      *      state input for the first time step of the computation.
2702      * * 9: auxInput.
2703      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
2704      *      it is set to true, then the input has a shape [maxTime, batchSize,
2705      *      auxInputSize], otherwise the input has a shape [batchSize, maxTime,
2706      *      auxInputSize]. Can be omitted. See the docs above for the usage
2707      *      modes explanation.
2708      * * 10:fwAuxWeights.
2709      *      A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted.
2710      *      See the docs above for the usage modes explanation.
2711      * * 11:bwAuxWeights.
2712      *      A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted.
2713      *      See the docs above for the usage modes explanation.
2714      * * 12:fusedActivationFunction.
2715      *      A {@link FusedActivationFunc} value indicating the activation function. If
2716      *      “NONE” is specified then it results in a linear activation.
2717      * * 13:timeMajor
2718      *      An {@link OperandType::BOOL} scalar specifying the shape format
2719      *      of input and output tensors.
2720      * * 14:mergeOutputs
2721      *      An {@link OperandType::BOOL} scalar specifying if the outputs
2722      *      from forward and backward cells are separate (if set to false) or
2723      *      concatenated (if set to true).
2724      * Outputs:
2725      * * 0: fwOutput.
2726      *      A 3-D tensor. The first two dimensions of the shape are defined by
2727      *      the input 6 (timeMajor) and the third dimension is defined by the
2728      *      input 14 (mergeOutputs). If timeMajor is set to true, then the first
2729      *      two dimensions are [maxTime, batchSize], otherwise they are set to
2730      *      [batchSize, maxTime]. If mergeOutputs is set to true, then the third
2731      *      dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
2732      *      to fwNumUnits.
2733      * * 1: bwOutput.
2734      *      A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
2735      *      this tensor is not produced. The shape is defined by the input 6
2736      *      (timeMajor). If it is set to true, then the shape is set to
2737      *      [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
2738      *      [batchSize, maxTime, bwNumUnits].
2739      * * 2: The forward hidden state output.
2740      *      A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden
2741      *      state from the last time step in the sequence. This output is
2742      *      optional and can be omitted. If this output is present then output
2743      *      3 must be present as well.
2744      *      Available since HAL version 1.3.
2745      * * 3: The backward hidden state output.
2746      *      A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden
2747      *      state from the last time step in the sequence. This output is
2748      *      optional and can be omitted. If this output is present then output
2749      *      2 must be present as well.
2750      *      Available since HAL version 1.3.
2751      */
2752     BIDIRECTIONAL_SEQUENCE_RNN = 43,
2753 
2754     /**
2755      * Greedily selects a subset of bounding boxes in descending order of score.
2756      *
2757      * This op applies NMS algorithm to each class. In each loop of execution,
2758      * the box with maximum score gets selected and removed from the pending set.
2759      * The scores of the rest of boxes are lowered according to the
2760      * intersection-over-union (IOU) overlapping with the previously selected
2761      * boxes and a specified NMS kernel method. Any boxes with score less
2762      * than a threshold are removed from the pending set.
2763      *
2764      * Three NMS kernels are supported:
2765      * * Hard:     score_new = score_old * (1 if IoU < threshold else 0)
2766      * * Linear:   score_new = score_old * (1 if IoU < threshold else 1 - IoU)
2767      * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
2768      *
2769      * Axis-aligned bounding boxes are represented by its upper-left corner
2770      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
2771      * bounding box should satisfy x1 <= x2 and y1 <= y2.
2772      *
2773      * Supported tensor {@link OperandType}:
2774      * * {@link OperandType::TENSOR_FLOAT16}
2775      * * {@link OperandType::TENSOR_FLOAT32}
2776      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2777      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2778      *
2779      * Inputs:
2780      * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
2781      *      of each bounding box proposal. The boxes are grouped by batches in the
2782      *      first dimension. Zero num_rois is supported for this tensor.
2783      * * 1: A 2-D Tensor specifying the bounding boxes of shape
2784      *      [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
2785      *      The boxes are grouped by batches in the first dimension. The sequential
2786      *      order of the boxes corresponds with input0. For input0 of type
2787      *      {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
2788      *      {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
2789      *      scale of 0.125.
2790      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
2791      *      this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
2792      *      with zeroPoint of -128 and scale of 0.125.
2793      *      Zero num_rois is supported for this tensor.
2794      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
2795      *      [num_rois], specifying the batch index of each box. Boxes with
2796      *      the same batch index are grouped together.
2797      * * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
2798      *      with scores lower than the threshold are filtered before sending
2799      *      to the NMS algorithm.
2800      * * 4: An {@link OperandType::INT32} scalar, specifying the maximum
2801      *      number of selected bounding boxes for each image. Set to a negative
2802      *      value for unlimited number of output bounding boxes.
2803      * * 5: An {@link OperandType::INT32} scalar, specifying the NMS
2804      *      kernel method, options are 0:hard, 1:linear, 2:gaussian.
2805      * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU
2806      *      threshold in hard and linear NMS kernel. This field is ignored if
2807      *      gaussian kernel is selected.
2808      * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in
2809      *      gaussian NMS kernel. This field is ignored if gaussian kernel is
2810      *      not selected.
2811      * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold.
2812      *      Boxes with scores lower than the threshold are dropped during the
2813      *      score updating phase in soft NMS.
2814      *
2815      * Outputs:
2816      * * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape
2817      *      [num_output_rois], specifying the score of each output box. The boxes
2818      *      are grouped by batches, but the sequential order in each batch is not
2819      *      guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM},
2820      *      guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM}
2821      *      or {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
2822      *      the scale and zero point must be the same as input0.
2823      * * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape
2824      *      [num_output_rois, 4], specifying the coordinates of each
2825      *      output bounding box with the same format as input1. The sequential
2826      *      order of the boxes corresponds with output0. For type of
2827      *      {@link OperandType::TENSOR_QUANT16_ASYMM}, the scale must be
2828      *      0.125 and the zero point must be 0.
2829      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
2830      *      [num_output_rois], specifying the class of each output box. The
2831      *      sequential order of the boxes corresponds with output0.
2832      * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
2833      *      [num_output_rois], specifying the batch index of each box. Boxes
2834      *      with the same batch index are grouped together.
2835      */
2836     BOX_WITH_NMS_LIMIT = 44,
2837 
2838     /**
2839      * Casts a tensor to a type.
2840      *
2841      * This operation ignores the scale and zeroPoint of quanized tensors,
2842      * e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input
2843      * as a tensor of uint8 values.
2844      *
2845      * Supported tensor {@link OperandType}:
2846      * * {@link OperandType::TENSOR_FLOAT16}
2847      * * {@link OperandType::TENSOR_FLOAT32}
2848      * * {@link OperandType::TENSOR_INT32}
2849      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2850      * Since HAL version 1.3, casting tensors of the following
2851      * {@link OperandType} to the same {@link OperandType} is supported:
2852      * * {@link OperandType::TENSOR_BOOL8}
2853      * * {@link OperandType::TENSOR_INT32}
2854      * * {@link OperandType::TENSOR_QUANT16_ASYMM}
2855      * * {@link OperandType::TENSOR_QUANT16_SYMM}
2856      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
2857      * * {@link OperandType::TENSOR_QUANT8_SYMM}
2858      *
2859      * Supported tensor rank: from 1
2860      *
2861      * Inputs:
2862      * * 0: A tensor.
2863      *
2864      * Outputs:
2865      * * 0: A tensor with the same shape as input0.
2866      */
2867     CAST = 45,
2868 
2869     /**
2870      * Shuffle the channels of the input tensor.
2871      *
2872      * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
2873      * divide the channel dimension into num_groups groups, and reorganize the
2874      * channels by grouping channels with the same index in each group.
2875      *
2876      * Along the channel dimension, the output is calculated using this formula:
2877      *
2878      *     output_channel[k * num_groups + g] = input_channel[g * group_size + k]
2879      *
2880      * where group_size = num_channels / num_groups
2881      *
2882      * The number of channels must be divisible by num_groups.
2883      *
2884      * Supported tensor {@link OperandType}:
2885      * * {@link OperandType::TENSOR_FLOAT16}
2886      * * {@link OperandType::TENSOR_FLOAT32}
2887      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2888      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2889      *
2890      * Supported tensor rank: up to 4
2891      *
2892      * Inputs:
2893      * * 0: An n-D tensor, specifying the tensor to be shuffled.
2894      * * 1: An {@link OperandType::INT32} scalar, specifying the number of
2895      *      groups.
2896      * * 2: An {@link OperandType::INT32} scalar, specifying the dimension
2897      *      channel shuffle would be performed on. Negative index is used to
2898      *      specify axis from the end (e.g. -1 for the last axis). Must be in
2899      *      the range [-n, n).
2900      *
2901      * Outputs:
2902      * * 0: A tensor of the same {@link OperandType} and same shape as input0.
2903      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
2904      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2905      *      the scale and zeroPoint must be the same as input0.
2906      */
2907     CHANNEL_SHUFFLE = 46,
2908 
2909     /**
2910      * Apply postprocessing steps to bounding box detections.
2911      *
2912      * Bounding box detections are generated by applying transformation on a set
2913      * of predefined anchors with the bounding box deltas from bounding box
2914      * regression. A final step of hard NMS is applied to limit the number of
2915      * returned boxes.
2916      *
2917      * Supported tensor {@link OperandType}:
2918      * * {@link OperandType::TENSOR_FLOAT16}
2919      * * {@link OperandType::TENSOR_FLOAT32}
2920      *
2921      * Inputs:
2922      * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
2923      *      the score of each anchor with each class. Class 0 for each
2924      *      [batches, num_anchors, 0] is background and will be ignored.
2925      * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
2926      *      the first four values in length_box_encoding specifying the bounding
2927      *      box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
2928      *      where dy and dx is the linear-scale relative correction factor for the
2929      *      center position of the bounding box with respect to the width and height,
2930      *      dh and dw is the log-scale relative correction factor for the width and
2931      *      height. All the entries in length_box_encoding beyond the first four
2932      *      values are ignored in this operation.
2933      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
2934      *      predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
2935      *      ctr_x are the center position of the box, and h and w are the height
2936      *      and the width.
2937      * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scaling
2938      *      factor for dy in bounding box deltas.
2939      * * 4: An {@link OperandType::FLOAT32} scalar, specifying the scaling
2940      *      factor for dx in bounding box deltas.
2941      * * 5: An {@link OperandType::FLOAT32} scalar, specifying the scaling
2942      *      factor for dh in bounding box deltas.
2943      * * 6: An {@link OperandType::FLOAT32} scalar, specifying the scaling
2944      *      factor for dw in bounding box deltas.
2945      * * 7: An {@link OperandType::BOOL} scalar, set to true to use regular
2946      *      multi-class NMS algorithm that do NMS separately for each class,
2947      *      set to false for a faster algorithm that only do one single NMS
2948      *      using the highest class score..
2949      * * 8: An {@link OperandType::INT32} scalar, max_num_detections, specifying
2950      *      the maximum number of boxes for the output. Boxes with the lowest
2951      *      scores are discarded to meet the limit.
2952      * * 9: An {@link OperandType::INT32} scalar, only used when input7 is
2953      *      set to false, specifying the maximum number of classes per detection.
2954      * * 10: An {@link OperandType::INT32} scalar, only used when input7 is
2955      *       set to true, specifying the maximum number of detections when
2956      *       applying NMS algorithm for each single class.
2957      * * 11: A scalar, score_threshold. Boxes with scores lower than the
2958      *       threshold are filtered before sending to the NMS algorithm. The
2959      *       scalar must be of {@link OperandType::FLOAT16} if input0 is of
2960      *       {@link OperandType::TENSOR_FLOAT16} and of
2961      *       {@link OperandType::FLOAT32} if input0 is of
2962      *       {@link OperandType::TENSOR_FLOAT32}.
2963      * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
2964      *       must be of {@link OperandType::FLOAT16} if input0 is of
2965      *       {@link OperandType::TENSOR_FLOAT16} and of
2966      *       {@link OperandType::FLOAT32} if input0 is of
2967      *       {@link OperandType::TENSOR_FLOAT32}.
2968      * * 13: An {@link OperandType::BOOL} scalar, set to true to include
2969      *       background class in the list of label map for the output, set
2970      *       to false to not include the background. When the background
2971      *       class is included, it has label 0 and the output classes start
2972      *       at 1 in the label map, otherwise, the output classes start at 0.
2973      *
2974      * Outputs:
2975      * * 0: A 2-D tensor of the same {@link OperandType} as input0, with shape
2976      *      [batches, max_num_detections], specifying the score of each output
2977      *      detections.
2978      * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
2979      *      coordinates of each output bounding box, with format
2980      *      [y1, x1, y2, x2].
2981      * * 2: A 2-D {@link OperandType::TENSOR_INT32} tensor, of shape
2982      *      [batches, max_num_detections], specifying the class label for each
2983      *      output detection.
2984      * * 3: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape [batches],
2985      *      specifying the number of valid output detections for each batch.
2986      */
2987     DETECTION_POSTPROCESSING = 47,
2988 
2989     /**
2990      * For input tensors x and y, computes x == y elementwise.
2991      *
2992      * Supported tensor {@link OperandType}:
2993      * * {@link OperandType::TENSOR_BOOL8}
2994      * * {@link OperandType::TENSOR_FLOAT16}
2995      * * {@link OperandType::TENSOR_FLOAT32}
2996      * * {@link OperandType::TENSOR_INT32}
2997      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
2998      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
2999      *
3000      * Supported tensor rank: from 1
3001      *
3002      * This operation supports broadcasting.
3003      *
3004      * Inputs:
3005      * * 0: A tensor.
3006      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3007      *      with input0.
3008      *
3009      * Outputs:
3010      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3011      */
3012     EQUAL = 48,
3013 
3014     /**
3015      * Computes exponential of x element-wise.
3016      *
3017      * Supported tensor {@link OperandType}:
3018      * * {@link OperandType::TENSOR_FLOAT16}
3019      * * {@link OperandType::TENSOR_FLOAT32}
3020      *
3021      * Supported tensor rank: from 1.
3022      *
3023      * Inputs:
3024      * * 0: A tensor.
3025      *
3026      * Outputs:
3027      * * 0: The output tensor of same shape as input0.
3028      */
3029     EXP = 49,
3030 
3031     /**
3032      * Inserts a dimension of 1 into a tensor's shape.
3033      *
3034      * Given a tensor input, this operation inserts a dimension of 1 at the
3035      * given dimension index of input's shape. The dimension index starts at
3036      * zero; if you specify a negative dimension index, it is counted backward
3037      * from the end.
3038      *
3039      * Supported tensor {@link OperandType}:
3040      * * {@link OperandType::TENSOR_FLOAT16}
3041      * * {@link OperandType::TENSOR_FLOAT32}
3042      * * {@link OperandType::TENSOR_INT32}
3043      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3044      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3045      *
3046      * Supported tensor rank: from 1
3047      *
3048      * Inputs:
3049      * * 0: An n-D tensor.
3050      * * 1: An {@link OperandType::INT32} scalar specifying the dimension
3051      *      index to expand. Must be in the range [-(n + 1), (n + 1)).
3052      *
3053      * Outputs:
3054      * * 0: An (n + 1)-D tensor with the same {@link OperandType} and data as
3055      *      input0.
3056      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3057      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3058      *      the scale and zeroPoint must be the same as input0.
3059      */
3060     EXPAND_DIMS = 50,
3061 
3062     /**
3063      * Gathers values along an axis.
3064      *
3065      * Produces an output tensor with shape
3066      *     input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
3067      * where:
3068      *     # Vector indices (output is rank(input0)).
3069      *     output[a_0, ..., a_n, i, b_0, ..., b_n] =
3070      *       input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
3071      *
3072      *     # Higher rank indices (output is rank(input0) + rank(indices) - 1).
3073      *     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
3074      *       input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
3075      *
3076      * Supported tensor {@link OperandType}:
3077      * * {@link OperandType::TENSOR_FLOAT16}
3078      * * {@link OperandType::TENSOR_FLOAT32}
3079      * * {@link OperandType::TENSOR_INT32}
3080      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3081      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3082      *
3083      * Supported tensor rank: from 1
3084      *
3085      * Inputs:
3086      * * 0: An n-D tensor from which to gather values.
3087      * * 1: An {@link OperandType::INT32} scalar specifying the axis.
3088      *      Negative index is used to specify axis from the end
3089      *      (e.g. -1 for the last axis). Must be in the range [-n, n).
3090      * * 2: A k-D tensor {@link OperandType::TENSOR_INT32} of indices.
3091      *      The values must be in the bounds of the corresponding dimensions
3092      *      of input0.
3093      *
3094      * Outputs:
3095      * * 0: An (n + k - 1)-D tensor with the same {@link OperandType} as input0.
3096      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3097      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3098      *      the scale and zeroPoint must be the same as input0.
3099      */
3100     GATHER = 51,
3101 
3102     /**
3103      * Generate aixs-aligned bounding box proposals.
3104      *
3105      * Bounding box proposals are generated by applying transformation on a set
3106      * of predefined anchors with the bounding box deltas from bounding box
3107      * regression. A final step of hard NMS is applied to limit the number of
3108      * returned boxes.
3109      *
3110      * Axis-aligned bounding boxes are represented by its upper-left corner
3111      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3112      * bounding box should satisfy x1 <= x2 and y1 <= y2.
3113      *
3114      * Supported tensor {@link OperandType}:
3115      * * {@link OperandType::TENSOR_FLOAT16}
3116      * * {@link OperandType::TENSOR_FLOAT32}
3117      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3118      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3119      *
3120      * Inputs:
3121      * * 0: A 4-D Tensor specifying the score of each anchor at each
3122      *      location. With "NHWC" data layout, the tensor shape is
3123      *      [batches, height, width, num_anchors]. With "NCHW" data layout,
3124      *      the tensor shape is [batches, num_anchors, height, width].
3125      * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
3126      *      layout, the tensor shape is [batches, height, width, num_anchors * 4].
3127      *      With "NCHW" data layout, the tensor shape is
3128      *      [batches, num_anchors * 4, height, width]. The box deltas are encoded
3129      *      in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
3130      *      relative correction factor for the center position of the bounding box
3131      *      with respect to the width and height, dw and dh is the log-scale
3132      *      relative correction factor for the width and height. The last
3133      *      dimensions is the channel dimension.
3134      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3135      *      predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
3136      *      {@link OperandType::TENSOR_QUANT8_ASYMM} or
3137      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of
3138      *      {@link OperandType::TENSOR_QUANT16_SYMM}, with scale of 0.125.
3139      * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
3140      *      each image in the batch, with format [image_height, image_width].
3141      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} or
3142      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this
3143      *      tensor should be of {@link OperandType::TENSOR_QUANT16_SYMM}, with
3144      *      scale of 0.125.
3145      * * 4: An {@link OperandType::FLOAT32} scalar, specifying the ratio
3146      *      from the height of original image to the height of feature map.
3147      * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
3148      *      from the width of original image to the width of feature map.
3149      * * 6: An {@link OperandType::INT32} scalar, specifying the maximum
3150      *      number of boxes before going into the hard NMS algorithm. Boxes
3151      *      with the lowest scores are discarded to meet the limit. Set to
3152      *      a non-positive value for unlimited number.
3153      * * 7: An {@link OperandType::INT32} scalar, specifying the maximum
3154      *      number of boxes returning from the hard NMS algorithm. Boxes
3155      *      with the lowest scores are discarded to meet the limit. Set to
3156      *      a non-positive value for unlimited number.
3157      * * 8: An {@link OperandType::FLOAT32} scalar, specifying the IoU
3158      *      threshold for hard NMS.
3159      * * 9: An {@link OperandType::FLOAT32} scalar, min_size. Boxes with
3160      *      height or width lower than the absolute threshold are filtered out.
3161      * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
3162      *       NCHW data layout for input0 and input1. Set to false for NHWC.
3163      *
3164      * Outputs:
3165      * * 0: A tensor of the same {@link OperandType} as input0, of shape
3166      *      [num_output_rois], specifying the score of each output box.
3167      *      The boxes are grouped by batches, but the sequential order in
3168      *      each batch is not guaranteed. For type of
3169      *      {@link OperandType::TENSOR_QUANT8_ASYMM} or
3170      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero
3171      *      point must be the same as input0.
3172      * * 1: A tensor of the same {@link OperandType} as input3, of shape
3173      *      [num_output_rois, 4], specifying the coordinates of each output
3174      *      bounding box for each class, with format [x1, y1, x2, y2].
3175      *      The sequential order of the boxes corresponds with output0.
3176      *      For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
3177      *      scale must be 0.125 and the zero point must be 0.
3178      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
3179      *      [num_output_rois], specifying the batch index of each box. Boxes
3180      *      with the same batch index are grouped together.
3181      */
3182     GENERATE_PROPOSALS = 52,
3183 
3184     /**
3185      * For input tensors x and y, computes x > y elementwise.
3186      *
3187      * Supported tensor {@link OperandType}:
3188      * * {@link OperandType::TENSOR_BOOL8}
3189      * * {@link OperandType::TENSOR_FLOAT16}
3190      * * {@link OperandType::TENSOR_FLOAT32}
3191      * * {@link OperandType::TENSOR_INT32}
3192      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3193      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3194      *
3195      * Supported tensor rank: from 1
3196      *
3197      * This operation supports broadcasting.
3198      *
3199      * Inputs:
3200      * * 0: A tensor.
3201      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3202      *      with input0.
3203      *
3204      * Outputs:
3205      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3206      */
3207     GREATER = 53,
3208     /**
3209      * For input tensors x and y, computes x >= y elementwise.
3210      *
3211      * Supported tensor {@link OperandType}:
3212      * * {@link OperandType::TENSOR_BOOL8}
3213      * * {@link OperandType::TENSOR_FLOAT16}
3214      * * {@link OperandType::TENSOR_FLOAT32}
3215      * * {@link OperandType::TENSOR_INT32}
3216      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3217      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3218      *
3219      * Supported tensor rank: from 1
3220      *
3221      * This operation supports broadcasting.
3222      *
3223      * Inputs:
3224      * * 0: A tensor.
3225      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3226      *      with input0.
3227      *
3228      * Outputs:
3229      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3230      */
3231     GREATER_EQUAL = 54,
3232 
3233     /**
3234      * Performs a grouped 2-D convolution operation.
3235      *
3236      * Given an input tensor of shape [batches, height, width, depth_in] and a
3237      * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
3238      * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
3239      * applies a group of different filters to each input channel group, then
3240      * concatenates the results together.
3241      *
3242      * Specifically, the input channels are divided into num_groups groups, each with
3243      * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
3244      * filters are also divided into num_groups groups, i.e. depth_out is divisible
3245      * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
3246      * input channel group, and the result are concatenated together.
3247      *
3248      * The output dimensions are functions of the filter dimensions, stride, and
3249      * padding.
3250      *
3251      * The values in the output tensor are computed as:
3252      *
3253      *     output[b, i, j, g * channel_multiplier + q] =
3254      *         sum_{di, dj, dk} (
3255      *             input[b, strides[1] * i + di, strides[2] * j + dj,
3256      *                   g * depth_group + dk] *
3257      *             filter[g * channel_multiplier + q, di, dj, dk]
3258      *         ) + bias[channel]
3259      *
3260      * where channel_multiplier = depth_out / num_groups
3261      *
3262      * Supported tensor {@link OperandType} configurations:
3263      * * 16 bit floating point:
3264      * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
3265      *
3266      * * 32 bit floating point:
3267      * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
3268      *
3269      * * Quantized:
3270      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
3271      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
3272      * * * input.scale * filter.scale).
3273      *
3274      * * Quantized signed (since HAL version 1.3):
3275      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
3276      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
3277      * * * input.scale * filter.scale).
3278      *
3279      * * Quantized with symmetric per channel quantization for the filter:
3280      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
3281      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3282      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
3283      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3284      *
3285      * * Quantized signed with filter symmetric per channel quantization
3286      *   (since HAL version 1.3):
3287      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
3288      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3289      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
3290      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3291      *
3292      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3293      * With the default data layout NHWC, the data is stored in the order of:
3294      * [batch, height, width, channels]. Alternatively, the data layout could
3295      * be NCHW, the data storage order of: [batch, channels, height, width].
3296      *
3297      * Both explicit padding and implicit padding are supported.
3298      *
3299      * Inputs (explicit padding):
3300      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3301      *      specifying the input, where depth_in = num_groups * depth_group.
3302      * * 1: A 4-D tensor, of shape
3303      *      [depth_out, filter_height, filter_width, depth_group], specifying
3304      *      the filter, where depth_out must be divisible by num_groups.  For
3305      *      tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
3306      *      the channel dimension (channelDim at
3307      *      {@link SymmPerChannelQuantParams}) must be set to 0.
3308      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3309      *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
3310      *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
3311      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
3312      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
3313      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
3314      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3315      *      of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3316      *      should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
3317      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3318      *      bias_scale[i] = input_scale * filter_scale[i].
3319      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
3320      *      the left, in the ‘width’ dimension.
3321      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
3322      *      the right, in the ‘width’ dimension.
3323      * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
3324      *      the top, in the ‘height’ dimension.
3325      * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
3326      *      the bottom, in the ‘height’ dimension.
3327      * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
3328      *      walking through input in the ‘width’ dimension.
3329      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
3330      *      walking through input in the ‘height’ dimension.
3331      * * 9: An {@link OperandType::INT32} scalar, specifying the number of
3332      *      groups.
3333      * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
3334      *       {@link FusedActivationFunc} values. Specifies the activation to
3335      *       invoke on the result.
3336      * * 11: An {@link OperandType::BOOL} scalar, set to true to specify
3337      *       NCHW data layout for input0 and output0. Set to false for NHWC.
3338      *
3339      * Inputs (implicit padding):
3340      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3341      *      specifying the input, where depth_in = num_groups * depth_group.
3342      * * 1: A 4-D tensor, of shape
3343      *      [depth_out, filter_height, filter_width, depth_group], specifying
3344      *      the filter, where depth_out must be divisible by num_groups.  For
3345      *      tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
3346      *      the channel dimension (SymmPerChannelQuantParams::channelDim)
3347      *      must be set to 0.
3348      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3349      *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
3350      *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
3351      *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type.
3352      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
3353      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
3354      *      the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
3355      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3356      *      of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3357      *      should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
3358      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3359      *      bias_scale[i] = input_scale * filter_scale[i].
3360      * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
3361      *      padding scheme, has to be one of the
3362      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
3363      * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
3364      *      walking through input in the ‘width’ dimension.
3365      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
3366      *      walking through input in the ‘height’ dimension.
3367      * * 6: An {@link OperandType::INT32} scalar, specifying the number of
3368      *      groups.
3369      * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
3370      *      {@link FusedActivationFunc} values. Specifies the activation to
3371      *      invoke on the result.
3372      * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
3373      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3374      *
3375      * Outputs:
3376      * * 0: The output 4-D tensor, of shape
3377      *      [batches, out_height, out_width, depth_out].
3378      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3379      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3380      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3381      */
3382     GROUPED_CONV_2D = 55,
3383 
3384     /**
3385      * Localize the maximum keypoints from heatmaps.
3386      *
3387      * This operation approximates the accurate maximum keypoint scores and
3388      * indices after bicubic upscaling by using Taylor expansion up to the
3389      * quadratic term.
3390      *
3391      * The bounding box is represented by its upper-left corner coordinate
3392      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
3393      * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
3394      *
3395      * Supported tensor {@link OperandType}:
3396      * * {@link OperandType::TENSOR_FLOAT16}
3397      * * {@link OperandType::TENSOR_FLOAT32}
3398      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3399      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3400      *
3401      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3402      * With the default data layout NHWC, the data is stored in the order of:
3403      * [batch, height, width, channels]. Alternatively, the data layout could
3404      * be NCHW, the data storage order of: [batch, channels, height, width].
3405      *
3406      * Inputs:
3407      * * 0: A 4-D Tensor of shape
3408      *      [num_boxes, heatmap_size, heatmap_size, num_keypoints],
3409      *      specifying the heatmaps, the height and width of heatmaps should
3410      *      be the same, and must be greater than or equal to 2.
3411      * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
3412      *      each with format [x1, y1, x2, y2]. For input0 of type
3413      *      {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should
3414      *      be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint
3415      *      of 0 and scale of 0.125.
3416      *      For input0 of type
3417      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this tensor
3418      *      should be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with
3419      *      zeroPoint of -128 and scale of 0.125.
3420      * * 2: An {@link OperandType::BOOL} scalar, set to true to specify
3421      *      NCHW data layout for input0. Set to false for NHWC.
3422      *
3423      * Outputs:
3424      * * 0: A tensor of the same {@link OperandType} as input0, with shape
3425      *      [num_boxes, num_keypoints], specifying score of the keypoints.
3426      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} or
3427      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3428      *      the scale and zeroPoint can be different from input0 scale and zeroPoint.
3429      * * 1: A tensor of the same {@link OperandType} as input1, with shape
3430      *      [num_boxes, num_keypoints, 2], specifying the location of
3431      *      the keypoints, the second dimension is organized as
3432      *      [keypoint_x, keypoint_y].
3433      *      For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
3434      *      scale must be 0.125 and the zero point must be 0.
3435      */
3436     HEATMAP_MAX_KEYPOINT = 56,
3437 
3438     /**
3439      * Applies instance normalization to the input tensor.
3440      *
3441      * The values in the output tensor are computed as:
3442      *
3443      *     output[b, h, w, c] =
3444      *         (input[b, h, w, c] - mean[b, c]) * gamma /
3445      *         sqrt(var[b, c] + epsilon) + beta
3446      *
3447      * Where the mean and variance are computed across the spatial dimensions:
3448      *
3449      *     mean[b, c] =
3450      *         sum_{h, w}(input[b, h, w, c]) / sum(1)
3451      *
3452      *     var[b, c] =
3453      *         sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
3454      *
3455      * Supported tensor {@link OperandType}:
3456      * * {@link OperandType::TENSOR_FLOAT16}
3457      * * {@link OperandType::TENSOR_FLOAT32}
3458      *
3459      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3460      * With the default data layout NHWC, the data is stored in the order of:
3461      * [batch, height, width, channels]. Alternatively, the data layout could
3462      * be NCHW, the data storage order of: [batch, channels, height, width].
3463      *
3464      * Inputs:
3465      * * 0: An n-D tensor, specifying the tensor to be normalized.
3466      * * 1: A scalar, specifying gamma, the scale applied to the normalized
3467      *      tensor. The scalar must be of {@link OperandType::FLOAT16} if
3468      *      input0 is of {@link OperandType::TENSOR_FLOAT16} and of
3469      *      {@link OperandType::FLOAT32} if input0 is of
3470      *      {@link OperandType::TENSOR_FLOAT32}.
3471      * * 2: A scalar, specifying beta, the offset applied to the normalized
3472      *      tensor. The scalar must be of {@link OperandType::FLOAT16} if
3473      *      input0 is of {@link OperandType::TENSOR_FLOAT16} and of
3474      *      {@link OperandType::FLOAT32} if input0 is of
3475      *      {@link OperandType::TENSOR_FLOAT32}.
3476      * * 3: A scalar, specifying epsilon, the small value added to variance to
3477      *      avoid dividing by zero. The scalar must be of {@link OperandType::FLOAT16} if
3478      *      input0 is of {@link OperandType::TENSOR_FLOAT16} and of
3479      *      {@link OperandType::FLOAT32} if input0 is of
3480      *      {@link OperandType::TENSOR_FLOAT32}.
3481      * * 4: An {@link OperandType::BOOL} scalar, set to true to specify
3482      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3483      *
3484      * Outputs:
3485      * * 0: A tensor of the same {@link OperandType} and same shape as input0.
3486      */
3487     INSTANCE_NORMALIZATION = 57,
3488 
3489     /**
3490      * For input tensors x and y, computes x < y elementwise.
3491      *
3492      * Supported tensor {@link OperandType}:
3493      * * {@link OperandType::TENSOR_BOOL8}
3494      * * {@link OperandType::TENSOR_FLOAT16}
3495      * * {@link OperandType::TENSOR_FLOAT32}
3496      * * {@link OperandType::TENSOR_INT32}
3497      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3498      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3499      *
3500      * Supported tensor rank: from 1
3501      *
3502      * This operation supports broadcasting.
3503      *
3504      * Inputs:
3505      * * 0: A tensor.
3506      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3507      *      with input0.
3508      *
3509      * Outputs:
3510      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3511      */
3512     LESS = 58,
3513 
3514     /**
3515      * For input tensors x and y, computes x <= y elementwise.
3516      *
3517      * Supported tensor {@link OperandType}:
3518      * * {@link OperandType::TENSOR_BOOL8}
3519      * * {@link OperandType::TENSOR_FLOAT16}
3520      * * {@link OperandType::TENSOR_FLOAT32}
3521      * * {@link OperandType::TENSOR_INT32}
3522      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3523      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3524      *
3525      * Supported tensor rank: from 1
3526      *
3527      * This operation supports broadcasting.
3528      *
3529      * Inputs:
3530      * * 0: A tensor.
3531      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3532      *      with input0.
3533      *
3534      * Outputs:
3535      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3536      */
3537     LESS_EQUAL = 59,
3538 
3539     /**
3540      * Computes natural logarithm of x element-wise.
3541      *
3542      * Supported tensor {@link OperandType}:
3543      * * {@link OperandType::TENSOR_FLOAT16}
3544      * * {@link OperandType::TENSOR_FLOAT32}
3545      *
3546      * Supported tensor rank: from 1.
3547      *
3548      * Inputs:
3549      * * 0: A tensor.
3550      *
3551      * Outputs:
3552      * * 0: The output tensor of same shape as input0.
3553      */
3554     LOG = 60,
3555 
3556     /**
3557      * Returns the truth value of x AND y element-wise.
3558      *
3559      * Supported tensor {@link OperandType}:
3560      * * {@link OperandType::TENSOR_BOOL8}
3561      *
3562      * Supported tensor rank: from 1
3563      *
3564      * This operation supports broadcasting.
3565      *
3566      * Inputs:
3567      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3568      * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
3569      *      compatible with input0.
3570      *
3571      * Outputs:
3572      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3573      */
3574     LOGICAL_AND = 61,
3575 
3576     /**
3577      * Computes the truth value of NOT x element-wise.
3578      *
3579      * Supported tensor {@link OperandType}:
3580      * * {@link OperandType::TENSOR_BOOL8}
3581      *
3582      * Supported tensor rank: from 1.
3583      *
3584      * Inputs:
3585      * * 0: A tensor.
3586      *
3587      * Outputs:
3588      * * 0: The output tensor of same shape as input0.
3589      */
3590     LOGICAL_NOT = 62,
3591 
3592     /**
3593      * Returns the truth value of x OR y element-wise.
3594      *
3595      * Supported tensor {@link OperandType}:
3596      * * {@link OperandType::TENSOR_BOOL8}
3597      *
3598      * Supported tensor rank: from 1
3599      *
3600      * This operation supports broadcasting.
3601      *
3602      * Inputs:
3603      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3604      * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
3605      *      compatible with input0.
3606      *
3607      * Outputs:
3608      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3609      */
3610     LOGICAL_OR = 63,
3611 
3612     /**
3613      * Computes the log softmax activations given logits.
3614      *
3615      * The output is calculated using this formula:
3616      *
3617      *     output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
3618      *
3619      * Supported tensor {@link OperandType}:
3620      * * {@link OperandType::TENSOR_FLOAT16}
3621      * * {@link OperandType::TENSOR_FLOAT32}
3622      *
3623      * Supported tensor rank: from 1.
3624      *
3625      * Inputs:
3626      * * 0: A tensor specifying the input logits.
3627      * * 1: A scalar, specifying the positive scaling factor for the exponent,
3628      *      beta.
3629      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
3630      *      value must be of {@link OperandType::FLOAT16}.
3631      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
3632      *      value must be of {@link OperandType::FLOAT32}.
3633      * * 2: An {@link OperandType::INT32} scalar specifying the axis to
3634      *      reduce across. Negative index is used to specify axis from the
3635      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
3636      *
3637      * Outputs:
3638      * * 0: The output tensor of the same {@link OperandType} and shape as
3639      *      input0.
3640      */
3641     LOG_SOFTMAX = 64,
3642 
3643     /**
3644      * Returns the element-wise maximum of two tensors.
3645      *
3646      * Supported tensor {@link OperandType}:
3647      * * {@link OperandType::TENSOR_FLOAT16}
3648      * * {@link OperandType::TENSOR_FLOAT32}
3649      * * {@link OperandType::TENSOR_INT32}
3650      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3651      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3652      *
3653      * Supported tensor rank: from 1.
3654      *
3655      * Inputs:
3656      * * 0: A tensor.
3657      * * 1: A tensor of the same {@link OperandType} and compatible dimensions
3658      *      with input0.
3659      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
3660      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
3661      *
3662      * Outputs:
3663      * * 0: A tensor of the same {@link OperandType} as input0.
3664      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3665      *      {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
3666      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3667      */
3668     MAXIMUM = 65,
3669 
3670     /**
3671      * Returns the element-wise minimum of two tensors.
3672      *
3673      * Supported tensor {@link OperandType}:
3674      * * {@link OperandType::TENSOR_FLOAT16}
3675      * * {@link OperandType::TENSOR_FLOAT32}
3676      * * {@link OperandType::TENSOR_INT32}
3677      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3678      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3679      *
3680      * Supported tensor rank: from 1.
3681      *
3682      * Inputs:
3683      * * 0: A tensor.
3684      * * 1: A tensor of the same {@link OperandType} and compatible dimensions
3685      *      with input0.
3686      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
3687      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
3688      *
3689      * Outputs:
3690      * * 0: A tensor of the same {@link OperandType} as input0.
3691      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3692      *      {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
3693      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3694      */
3695     MINIMUM = 66,
3696 
3697     /**
3698      * Computes numerical negative value element-wise.
3699      *
3700      * Supported tensor {@link OperandType}:
3701      * * {@link OperandType::TENSOR_FLOAT16}
3702      * * {@link OperandType::TENSOR_FLOAT32}
3703      * * {@link OperandType::TENSOR_INT32}
3704      *
3705      * Supported tensor rank: from 1.
3706      *
3707      * Inputs:
3708      * * 0: A tensor.
3709      *
3710      * Outputs:
3711      * * 0: The output tensor of same shape as input0.
3712      */
3713     NEG = 67,
3714 
3715     /**
3716      * For input tensors x and y, computes x != y elementwise.
3717      *
3718      * Supported tensor {@link OperandType}:
3719      * * {@link OperandType::TENSOR_BOOL8}
3720      * * {@link OperandType::TENSOR_FLOAT16}
3721      * * {@link OperandType::TENSOR_FLOAT32}
3722      * * {@link OperandType::TENSOR_INT32}
3723      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3724      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3725      *
3726      * Supported tensor rank: from 1
3727      *
3728      * This operation supports broadcasting.
3729      *
3730      * Inputs:
3731      * * 0: A tensor.
3732      * * 1: A tensor of the same {@link OperandType} and dimensions compatible
3733      *      with input0.
3734      *
3735      * Outputs:
3736      * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
3737      */
3738     NOT_EQUAL = 68,
3739 
3740     /**
3741      * Pads a tensor with the given constant value according to the specified
3742      * paddings.
3743      *
3744      * Supported tensor {@link OperandType}:
3745      * * {@link OperandType::TENSOR_FLOAT16}
3746      * * {@link OperandType::TENSOR_FLOAT32}
3747      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3748      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3749      *
3750      * Supported tensor rank: up to 4
3751      *
3752      * Inputs:
3753      * * 0: An n-D tensor, specifying the tensor to be padded.
3754      * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
3755      *      for each spatial dimension of the input tensor. The shape of the
3756      *      tensor must be {rank(input0), 2}.
3757      *      padding[i, 0] specifies the number of elements to be padded in the
3758      *      front of dimension i.
3759      *      padding[i, 1] specifies the number of elements to be padded after
3760      *      the end of dimension i.
3761      * * 2: A scalar specifying the value to use for padding input0.
3762      *      For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
3763      *      pad value must be of {@link OperandType::FLOAT16}.
3764      *      For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
3765      *      pad value must be of {@link OperandType::FLOAT32}.
3766      *      For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and
3767      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
3768      *      the pad value must be of {@link OperandType::INT32}. The
3769      *      scale and zeroPoint are assumed to be the same as in input0.
3770      *
3771      * Outputs:
3772      * * 0: A tensor of the same {@link OperandType} as input0. The
3773      *      output tensor has the same rank as input0, and each
3774      *      dimension of the output tensor has the same size as the
3775      *      corresponding dimension of the input tensor plus the size
3776      *      of the padding:
3777      *          output0.dimension[i] =
3778      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
3779      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3780      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3781      *      the scale and zeroPoint must be the same as input0.
3782      */
3783     PAD_V2 = 69,
3784 
3785     /**
3786      * Computes the power of one value to another.
3787      *
3788      * Given a tensor base and a tensor exponent, this operation computes
3789      * base^exponent elementwise.
3790      *
3791      * This operations supports broadcasting. The size of the output is the
3792      * maximum size along each dimension of the input operands. It starts with
3793      * the trailing dimensions, and works its way forward.
3794      *
3795      * For example:
3796      *     base.dimension     =    {4, 1, 2}
3797      *     exponent.dimension = {5, 4, 3, 1}
3798      *     output.dimension   = {5, 4, 3, 2}
3799      *
3800      * Supported tensor {@link OperandType}:
3801      * * {@link OperandType::TENSOR_FLOAT16}
3802      * * {@link OperandType::TENSOR_FLOAT32}
3803      *
3804      * Supported tensor rank: from 1
3805      *
3806      * Inputs:
3807      * * 0: A tensor specifying the base.
3808      * * 1: A tensor specifying the exponent.
3809      *
3810      * Outputs:
3811      * * 0: An output tensor.
3812      */
3813     POW = 70,
3814 
3815     /**
3816      * Parametric Rectified Linear Unit.
3817      *
3818      * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
3819      * is a learned array with the same {@link OperandType} and compatible
3820      * dimensions as input x.
3821      *
3822      * Two dimensions are compatible when:
3823      *     1. they are equal, or
3824      *     2. one of them is 1
3825      *
3826      * The size of the output is the maximum size along each dimension of the
3827      * input operands. It starts with the trailing dimensions, and works its way
3828      * forward.
3829      *
3830      * Example:
3831      *     input.dimension  =    {4, 1, 2}
3832      *     alpha.dimension  = {5, 4, 3, 1}
3833      *     output.dimension = {5, 4, 3, 2}
3834      *
3835      * Supported tensor {@link OperandType}:
3836      * * {@link OperandType::TENSOR_FLOAT16}
3837      * * {@link OperandType::TENSOR_FLOAT32}
3838      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3839      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3840      *
3841      * Supported tensor rank: from 1
3842      *
3843      * Inputs:
3844      * * 0: A tensor, specifying the input.
3845      * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
3846      *      as input0, specifying the alpha.
3847      *
3848      * Outputs:
3849      * * 0: A tensor of the same {@link OperandType} as input0.
3850      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
3851      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3852      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
3853      */
3854     PRELU = 71,
3855 
3856     /**
3857      * Quantizes the input tensor.
3858      *
3859      * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM} output tensor is:
3860      *
3861      *     output = max(0, min(255, round(input / scale) + zeroPoint)
3862      *
3863      * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} output
3864      * tensor is:
3865      *
3866      *     output = max(-128, min(127, round(input / scale) + zeroPoint)
3867      *
3868      * Supported input tensor {@link OperandType}:
3869      * * {@link OperandType::TENSOR_FLOAT16}
3870      * * {@link OperandType::TENSOR_FLOAT32}
3871      *
3872      * Supported output tensor {@link OperandType}:
3873      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
3874      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
3875      *
3876      * Supported tensor rank: from 1
3877      *
3878      * Inputs:
3879      * * 0: A tensor, may be zero-sized.
3880      *
3881      * Outputs:
3882      * * 0: The output tensor of same shape as input0, but with
3883      *      {@link OperandType::TENSOR_QUANT8_ASYMM} or.
3884      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}.
3885      */
3886     QUANTIZE = 72,
3887 
3888     /**
3889      * A version of quantized LSTM, using 16 bit quantization for internal
3890      * state.
3891      *
3892      * There is no projection layer, so cell state size is equal to the output
3893      * size.
3894      *
3895      * Inputs:
3896      * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3897      *      and shape [numBatches, inputSize] specifying the input to the LSTM
3898      *      cell. Tensor is quantized with a fixed quantization range of
3899      *      [-1, 127/128] (scale = 1/128, zeroPoint = 128).
3900      * * 1: The input-to-input weights.
3901      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3902      *      and shape [outputSize, inputSize] specifying input-to-input part of
3903      *      weights for fully-connected layer inside the LSTM cell.
3904      *      Quantization zero point and scale must be the same across all the
3905      *      weights.
3906      * * 2: The input-to-forget weights.
3907      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3908      *      and shape [outputSize, inputSize] specifying input-to-forget part of
3909      *      weights for fully-connected layer inside the LSTM cell.
3910      *      Quantization zero point and scale must be the same across all the
3911      *      weights.
3912      * * 3: The input-to-cell weights.
3913      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3914      *      and shape [outputSize, inputSize] specifying input-to-cell part of
3915      *      weights for fully-connected layer inside the LSTM cell.
3916      *      Quantization zero point and scale must be the same across all the
3917      *      weights.
3918      * * 4: The input-to-output weights.
3919      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3920      *      and shape [outputSize, inputSize] specifying input-to-output part of
3921      *      weights for fully-connected layer inside the LSTM cell.
3922      *      Quantization zero point and scale must be the same across all the
3923      *      weights.
3924      * * 5: The recurrent-to-input weights.
3925      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3926      *      and shape [outputSize, outputSize] specifying recurrent-to-input part
3927      *      of weights for fully-connected layer inside the LSTM cell.
3928      *      Quantization zero point and scale must be the same across all the
3929      *      weights.
3930      * * 6: The recurrent-to-forget weights.
3931      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3932      *      and shape [outputSize, outputSize] specifying recurrent-to-forget
3933      *      part of weights for fully-connected layer inside the LSTM cell.
3934      *      Quantization zero point and scale must be the same across all the
3935      *      weights.
3936      * * 7: The recurrent-to-cell weights.
3937      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3938      *      and shape [outputSize, outputSize] specifying recurrent-to-cell part
3939      *      of weights for fully-connected layer inside the LSTM cell.
3940      *      Quantization zero point and scale must be the same across all the
3941      *      weights.
3942      * * 8: The recurrent-to-output weights.
3943      *      A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3944      *      and shape [outputSize, outputSize] specifying recurrent-to-output
3945      *      part of weights for fully-connected layer inside the LSTM cell.
3946      *      Quantization zero point and scale must be the same across all the
3947      *      weights.
3948      * * 9: The input gate bias.
3949      *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
3950      *      [outputSize] specifying the bias for the fully-connected layer
3951      *      inside the LSTM cell. Bias is quantized with scale being a product
3952      *      of input and weights scales and zeroPoint equal to 0.
3953      * * 10:The forget gate bias.
3954      *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
3955      *      [outputSize] specifying the bias for the fully-connected layer
3956      *      inside the LSTM cell. Bias is quantized with scale being a product
3957      *      of input and weights scales and zeroPoint equal to 0.
3958      * * 11:The cell bias.
3959      *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
3960      *      [outputSize] specifying the bias for the fully-connected layer
3961      *      inside the LSTM cell. Bias is quantized with scale being a product
3962      *      of input and weights scales and zeroPoint equal to 0.
3963      * * 12:The output gate bias.
3964      *      A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
3965      *      [outputSize] specifying the bias for the fully-connected layer
3966      *      inside the LSTM cell. Bias is quantized with scale being a product
3967      *      of input and weights scales and zeroPoint equal to 0.
3968      * * 13: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
3969      *       and shape [numBatches, outputSize] specifying the cell state from the
3970      *       previous time step of the LSTM cell. It is quantized using a
3971      *       quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
3972      *       32768, zeroPoint = 0).
3973      * * 14: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3974      *       and shape [numBathes, outputSize] specifying the output of the LSTM
3975      *       cell from previous time-step. Tensor is quantized with a fixed
3976      *       quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
3977      *       128).
3978      *
3979      *
3980      * Outputs:
3981      * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
3982      *      and shape [numBatches, outputSize] which contains a cell state from
3983      *      the current time step. Tensor is quantized using a quantization
3984      *      range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
3985      *      0).
3986      * * 1: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
3987      *      and shape [numBathes, outputSize] which contains the output value.
3988      *      Tensor is quantized with a fixed quantization range of [-1, 127/128]
3989      *      (scale = 1/128, zeroPoint = 128).
3990      */
3991     QUANTIZED_16BIT_LSTM = 73,
3992 
3993     /**
3994      * Draws samples from a multinomial distribution.
3995      *
3996      * Supported tensor {@link OperandType}:
3997      * * {@link OperandType::TENSOR_FLOAT16}
3998      * * {@link OperandType::TENSOR_FLOAT32}
3999      *
4000      * Inputs:
4001      * * 0: A 2-D tensor with shape [batches, classes], specifying the
4002      *      unnormalized log-probabilities for all classes.
4003      * * 1: A scalar {@link OperandType::INT32}, specifying the number of
4004      *      independent samples to draw for each row slice.
4005      * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [2],
4006      *      specifying seeds used to initialize the random distribution. If both
4007      *      provided seeds are 0, both will be randomly generated.
4008      * Outputs:
4009      * * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape
4010      *      [batches, samples], containing the drawn samples.
4011      */
4012     RANDOM_MULTINOMIAL = 74,
4013 
4014     /**
4015      * Reduces a tensor by computing the "logical and" of elements along given
4016      * dimensions.
4017      *
4018      * If keep_dims is true, the reduced dimensions are
4019      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4020      * 1 for each entry in dimensions.
4021      *
4022      * Supported tensor {@link OperandType}:
4023      * * {@link OperandType::TENSOR_BOOL8}
4024      *
4025      * Supported tensor rank: up to 4
4026      *
4027      * Inputs:
4028      * * 0: An n-D tensor.
4029      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4030      *      to reduce. Dimension values must be in the range [-n, n).
4031      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4032      *      retains reduced dimensions with length 1.
4033      *
4034      * Outputs:
4035      * * 0: A tensor of the same {@link OperandType} as input0.
4036      *      If all dimensions are reduced and keep_dims is false, the output
4037      *      shape is [1].
4038      */
4039     REDUCE_ALL = 75,
4040 
4041     /**
4042      * Reduces a tensor by computing the "logical or" of elements along given
4043      * dimensions.
4044      *
4045      * If keep_dims is true, the reduced dimensions are
4046      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4047      * 1 for each entry in dimensions.
4048      *
4049      * Supported tensor {@link OperandType}:
4050      * * {@link OperandType::TENSOR_BOOL8}
4051      *
4052      * Supported tensor rank: up to 4
4053      *
4054      * Inputs:
4055      * * 0: An n-D tensor.
4056      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4057      *      to reduce. Dimension values must be in the range [-n, n).
4058      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4059      *      retains reduced dimensions with length 1.
4060      *
4061      * Outputs:
4062      * * 0: A tensor of the same {@link OperandType} as input0.
4063      *      If all dimensions are reduced and keep_dims is false, the output
4064      *      shape is [1].
4065      */
4066     REDUCE_ANY = 76,
4067 
4068     /**
4069      * Reduces a tensor by computing the maximum of elements along given
4070      * dimensions.
4071      *
4072      * If keep_dims is true, the reduced dimensions are
4073      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4074      * 1 for each entry in dimensions.
4075      *
4076      * Supported tensor {@link OperandType}:
4077      * * {@link OperandType::TENSOR_FLOAT16}
4078      * * {@link OperandType::TENSOR_FLOAT32}
4079      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4080      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4081      *
4082      * Supported tensor rank: up to 4
4083      *
4084      * Inputs:
4085      * * 0: An n-D tensor.
4086      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4087      *      to reduce. Dimension values must be in the range [-n, n).
4088      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4089      *      retains reduced dimensions with length 1.
4090      *
4091      * Outputs:
4092      * * 0: A tensor of the same {@link OperandType} as input0.
4093      *      If all dimensions are reduced and keep_dims is false, the output
4094      *      shape is [1].
4095      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4096      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4097      *      the scale and zeroPoint must be the same as input0.
4098      */
4099     REDUCE_MAX = 77,
4100 
4101     /**
4102      * Reduces a tensor by computing the minimum of elements along given
4103      * dimensions.
4104      *
4105      * If keep_dims is true, the reduced dimensions are
4106      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4107      * 1 for each entry in dimensions.
4108      *
4109      * Supported tensor {@link OperandType}:
4110      * * {@link OperandType::TENSOR_FLOAT16}
4111      * * {@link OperandType::TENSOR_FLOAT32}
4112      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4113      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4114      *
4115      * Supported tensor rank: up to 4
4116      *
4117      * Inputs:
4118      * * 0: An n-D tensor.
4119      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4120      *      to reduce. Dimension values must be in the range [-n, n).
4121      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4122      *      retains reduced dimensions with length 1.
4123      *
4124      * Outputs:
4125      * * 0: A tensor of the same {@link OperandType} as input0.
4126      *      If all dimensions are reduced and keep_dims is false, the output
4127      *      shape is [1].
4128      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4129      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4130      *      the scale and zeroPoint must be the same as input0.
4131      */
4132     REDUCE_MIN = 78,
4133 
4134     /**
4135      * Reduces a tensor by multiplying elements along given dimensions.
4136      *
4137      * If keep_dims is true, the reduced dimensions are
4138      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4139      * 1 for each entry in dimensions.
4140      *
4141      * Supported tensor {@link OperandType}:
4142      * * {@link OperandType::TENSOR_FLOAT16}
4143      * * {@link OperandType::TENSOR_FLOAT32}
4144      *
4145      * Supported tensor rank: up to 4
4146      *
4147      * Inputs:
4148      * * 0: An n-D tensor.
4149      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4150      *      to reduce. Dimension values must be in the range [-n, n).
4151      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4152      *      retains reduced dimensions with length 1.
4153      *
4154      * Outputs:
4155      * * 0: A tensor of the same {@link OperandType} as input0.
4156      *      If all dimensions are reduced and keep_dims is false, the output
4157      *      shape is [1].
4158      */
4159     REDUCE_PROD = 79,
4160 
4161     /**
4162      * Reduces a tensor by summing elements along given dimensions.
4163      *
4164      * If keep_dims is true, the reduced dimensions are
4165      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4166      * 1 for each entry in dimensions.
4167      *
4168      * Supported tensor {@link OperandType}:
4169      * * {@link OperandType::TENSOR_FLOAT16}
4170      * * {@link OperandType::TENSOR_FLOAT32}
4171      *
4172      * Supported tensor rank: up to 4
4173      *
4174      * Inputs:
4175      * * 0: An n-D tensor.
4176      * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
4177      *      to reduce. Dimension values must be in the range [-n, n).
4178      * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
4179      *      retains reduced dimensions with length 1.
4180      *
4181      * Outputs:
4182      * * 0: A tensor of the same {@link OperandType} as input0.
4183      *      If all dimensions are reduced and keep_dims is false, the output
4184      *      shape is [1].
4185      */
4186     REDUCE_SUM = 80,
4187 
4188     /**
4189      * Select and scale the feature map of each region of interest to a unified
4190      * output size by average pooling sampling points from bilinear interpolation.
4191      *
4192      * The region of interest is represented by its upper-left corner coordinate
4193      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4194      * A spatial scaling factor is applied to map into feature map coordinate.
4195      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4196      *
4197      * No rounding is applied in this operation. The sampling points are unified
4198      * distributed in the pooling bin and their values are calculated by bilinear
4199      * interpolation.
4200      *
4201      * Supported tensor {@link OperandType}:
4202      * * {@link OperandType::TENSOR_FLOAT16}
4203      * * {@link OperandType::TENSOR_FLOAT32}
4204      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4205      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4206      *
4207      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4208      * With the default data layout NHWC, the data is stored in the order of:
4209      * [batch, height, width, channels]. Alternatively, the data layout could
4210      * be NCHW, the data storage order of: [batch, channels, height, width].
4211      *
4212      * Inputs:
4213      * * 0: A 4-D tensor, specifying the feature map.
4214      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4215      *      the regions of interest, each line with format [x1, y1, x2, y2].
4216      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
4217      *      this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
4218      *      with zeroPoint of 0 and scale of 0.125. Zero num_rois is
4219      *      supported for this tensor.
4220      * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
4221      *      [num_rois], specifying the batch index of each box. Boxes with
4222      *      the same batch index are grouped together. Zero num_rois is
4223      *      supported for this tensor.
4224      * * 3: An {@link OperandType::INT32} scalar, specifying the output
4225      *      height of the output tensor.
4226      * * 4: An {@link OperandType::INT32} scalar, specifying the output
4227      *      width of the output tensor.
4228      * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
4229      *      from the height of original image to the height of feature map.
4230      * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
4231      *      from the width of original image to the width of feature map.
4232      * * 7: An {@link OperandType::INT32} scalar, specifying the number of
4233      *      sampling points in height dimension used to compute the output.
4234      *      Set to 0 for adaptive value of ceil(roi_height/out_height).
4235      * * 8: An {@link OperandType::INT32} scalar, specifying the number of
4236      *      sampling points in width dimension used to compute the output.
4237      *      Set to 0 for adaptive value of ceil(roi_width/out_width).
4238      * * 9: An {@link OperandType::BOOL} scalar, set to true to specify
4239      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4240      *
4241      * Outputs:
4242      * * 0: A tensor of the same {@link OperandType} as input0. The output
4243      *      shape is [num_rois, out_height, out_width, depth].
4244      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4245      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4246      *      the scale and zeroPoint can be different from the input0 scale and zeroPoint.
4247      */
4248     ROI_ALIGN = 81,
4249 
4250     /**
4251      * Select and scale the feature map of each region of interest to a unified
4252      * output size by max-pooling.
4253      *
4254      * The region of interest is represented by its upper-left corner coordinate
4255      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4256      * A spatial scaling factor is applied to map into feature map coordinate.
4257      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4258      *
4259      * Rounding is applied in this operation to ensure integer boundary for
4260      * regions of interest and pooling bins.
4261      *
4262      * Supported tensor {@link OperandType}:
4263      * * {@link OperandType::TENSOR_FLOAT16}
4264      * * {@link OperandType::TENSOR_FLOAT32}
4265      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4266      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4267      *
4268      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4269      * With the default data layout NHWC, the data is stored in the order of:
4270      * [batch, height, width, channels]. Alternatively, the data layout could
4271      * be NCHW, the data storage order of: [batch, channels, height, width].
4272      *
4273      * Inputs:
4274      * * 0: A 4-D tensor, specifying the feature map.
4275      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4276      *      the regions of interest, each line with format [x1, y1, x2, y2].
4277      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} and
4278      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4279      *      this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
4280      *      with zeroPoint of 0 and scale of 0.125.
4281      * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
4282      *      [num_rois], specifying the batch index of each box. Boxes with
4283      *      the same batch index are grouped together.
4284      * * 3: An {@link OperandType::INT32} scalar, specifying the output
4285      *      height of the output tensor.
4286      * * 4: An {@link OperandType::INT32} scalar, specifying the output
4287      *      width of the output tensor.
4288      * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
4289      *      from the height of original image to the height of feature map.
4290      * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
4291      *      from the width of original image to the width of feature map.
4292      * * 7: An {@link OperandType::BOOL} scalar, set to true to specify
4293      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4294      *
4295      * Outputs:
4296      * * 0: A tensor of the same {@link OperandType} as input0. The output
4297      *      shape is [num_rois, out_height, out_width, depth].
4298      *      For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} and
4299      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4300      *      the scale and zeroPoint must be the same as input0.
4301      */
4302     ROI_POOLING = 82,
4303 
4304     /**
4305      * Computes reciprocal of square root of x element-wise.
4306      *
4307      * Supported tensor {@link OperandType}:
4308      * * {@link OperandType::TENSOR_FLOAT16}
4309      * * {@link OperandType::TENSOR_FLOAT32}
4310      *
4311      * Supported tensor rank: from 1.
4312      *
4313      * Inputs:
4314      * * 0: A tensor.
4315      *
4316      * Outputs:
4317      * * 0: The output tensor of same shape as input0.
4318      */
4319     RSQRT = 83,
4320 
4321     /**
4322      * Using a tensor of booleans c and input tensors x and y select values
4323      * elementwise from both input tensors:
4324      *
4325      * O[i] = C[i] ? x[i] : y[i].
4326      *
4327      * Supported tensor {@link OperandType}:
4328      * * {@link OperandType::TENSOR_FLOAT16}
4329      * * {@link OperandType::TENSOR_FLOAT32}
4330      * * {@link OperandType::TENSOR_INT32}
4331      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4332      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4333      *
4334      * Supported tensor rank: from 1
4335      *
4336      * Inputs:
4337      * * 0: A tensor of type {@link OperandType::TENSOR_BOOL8} acting as a
4338      *      mask that chooses, based on the value at each element, whether the
4339      *      corresponding element in the output should be taken from input1 (if
4340      *      true) or input2 (if false).
4341      * * 1: An input tensor of the same shape as input0.
4342      * * 2: An input tensor of the same shape and type as input1.
4343      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM}
4344      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4345      *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
4346      *
4347      * Outputs:
4348      * * 0: A tensor of the same type and shape as input1 and input2.
4349      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
4350      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4351      */
4352     SELECT = 84,
4353 
4354     /**
4355      * Computes sin of x element-wise.
4356      *
4357      * Supported tensor {@link OperandType}:
4358      * * {@link OperandType::TENSOR_FLOAT16}
4359      * * {@link OperandType::TENSOR_FLOAT32}
4360      *
4361      * Supported tensor rank: from 1.
4362      *
4363      * Inputs:
4364      * * 0: A tensor.
4365      *
4366      * Outputs:
4367      * * 0: The output tensor of same shape as input0.
4368      */
4369     SIN = 85,
4370 
4371     /**
4372      * Extracts a slice of specified size from the input tensor starting at a
4373      * specified location.
4374      *
4375      * The starting location is specified as a 1-D tensor containing offsets
4376      * for each dimension. The size is specified as a 1-D tensor containing
4377      * either size of a slice along corresponding dimension or -1. In the latter
4378      * case, all the remaining elements in dimension are included in the slice.
4379      *
4380      * A sum of begin offset and a size of a slice must not exceed size of a
4381      * corresponding dimension.
4382      *
4383      * Supported tensor {@link OperandType}:
4384      * * {@link OperandType::TENSOR_FLOAT16}
4385      * * {@link OperandType::TENSOR_FLOAT32}
4386      * * {@link OperandType::TENSOR_INT32}
4387      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4388      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4389      *
4390      * Supported tensor rank: from 1
4391      *
4392      * Inputs:
4393      * * 0: An n-D tensor to take slice from, may be zero-sized.
4394      * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
4395      *      the beginning indices of the slice in each dimension.
4396      * * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
4397      *      the size of the slice in each dimension.
4398      *
4399      * Outputs:
4400      * * 0: An n-D tensor of the same type as the input containing the slice.
4401      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4402      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4403      *      its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
4404      */
4405     SLICE = 86,
4406 
4407     /**
4408      * Splits a tensor along a given axis into num_splits subtensors.
4409      *
4410      * Supported tensor {@link OperandType}:
4411      * * {@link OperandType::TENSOR_FLOAT16}
4412      * * {@link OperandType::TENSOR_FLOAT32}
4413      * * {@link OperandType::TENSOR_INT32}
4414      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4415      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4416      *
4417      * Supported tensor rank: from 1
4418      *
4419      * Inputs:
4420      * * 0: An n-D tensor to split.
4421      * * 1: An {@link OperandType::INT32} scalar specifying the axis along
4422      *      which to split.
4423      * * 2: An {@link OperandType::INT32} scalar indicating the number of
4424      *      splits along given axis. Must evenly divide axis size.
4425      *
4426      * Outputs:
4427      * * 0 ~ (num_splits - 1): Resulting subtensors.
4428      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4429      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4430      *      the scale and zeroPoint must be the same as input0.
4431      */
4432     SPLIT = 87,
4433 
4434     /**
4435      * Computes square root of x element-wise.
4436      *
4437      * Supported tensor {@link OperandType}:
4438      * * {@link OperandType::TENSOR_FLOAT16}
4439      * * {@link OperandType::TENSOR_FLOAT32}
4440      *
4441      * Supported tensor rank: from 1.
4442      *
4443      * Inputs:
4444      * * 0: A tensor.
4445      *
4446      * Outputs:
4447      * * 0: The output tensor of same shape as input0.
4448      */
4449     SQRT = 88,
4450 
4451     /**
4452      * Constructs a tensor by tiling a given tensor.
4453      *
4454      * This operation creates a new tensor by replicating `input` `multiples`
4455      * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
4456      * elements, and the values of `input` are replicated `multiples[i]` times
4457      * along the i-th dimension.
4458      * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
4459      *
4460      * Supported tensor {@link OperandType}:
4461      * * {@link OperandType::TENSOR_FLOAT16}
4462      * * {@link OperandType::TENSOR_FLOAT32}
4463      * * {@link OperandType::TENSOR_INT32}
4464      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4465      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4466      *
4467      * Supported tensor rank: from 1
4468      *
4469      * Inputs:
4470      * * 0: input, an n-D tensor specifying the input.
4471      * * 1: multiples, a 1-D tensor of {@link OperandType::TENSOR_INT32}.
4472      *      The length of multiples must be n.
4473      *
4474      * Outputs:
4475      * * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
4476      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4477      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4478      *      the scale and zeroPoint must be the same as input0.
4479      */
4480     TILE = 89,
4481 
4482     /**
4483      * Finds values and indices of the k largest entries for the last dimension.
4484      *
4485      * Resulting values in each dimensions are sorted in descending order. If
4486      * two values are equal, the one with larger index appears first.
4487      *
4488      * Supported tensor {@link OperandType}:
4489      * * {@link OperandType::TENSOR_FLOAT16}
4490      * * {@link OperandType::TENSOR_FLOAT32}
4491      * * {@link OperandType::TENSOR_INT32}
4492      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4493      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
4494      *
4495      * Supported tensor rank: from 1
4496      *
4497      * Inputs:
4498      * * 0: input, an n-D tensor specifying the input.
4499      * * 1: k, an {@link OperandType::INT32} scalar, specifying the number of
4500      *      top elements to look for along the last dimension.
4501      *
4502      * Outputs:
4503      * * 0: An n-D tensor of the same type as the input, containing the k
4504      *      largest elements along each last dimensional slice.
4505      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4506      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4507      *      the scale and zeroPoint must be the same as input0.
4508      * * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
4509      *      containing the indices of values within the last dimension of input.
4510      */
4511     TOPK_V2 = 90,
4512 
4513     /**
4514      * Performs the transpose of 2-D convolution operation.
4515      *
4516      * This operation is sometimes called "deconvolution" after Deconvolutional
4517      * Networks, but is actually the transpose (gradient) of
4518      * {@link OperandType::CONV_2D} rather than an actual deconvolution.
4519      *
4520      * The output dimensions are functions of the filter dimensions, stride, and
4521      * padding.
4522      *
4523      * Supported tensor {@link OperandType} configurations:
4524      * * 16 bit floating point:
4525      * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
4526      *
4527      * * 32 bit floating point:
4528      * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
4529      *
4530      * * Quantized:
4531      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
4532      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
4533      * * * input.scale * filter.scale).
4534      *
4535      * * Quantized with symmetric per channel quantization for the filter:
4536      * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
4537      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4538      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
4539      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4540      *
4541      * Available since HAL version 1.3:
4542      * * Quantized signed (since HAL version 1.3):
4543      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
4544      * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
4545      * * * input.scale * filter.scale).
4546      *
4547      * * Quantized signed with filter symmetric per channel quantization
4548      *   (since HAL version 1.3):
4549      * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
4550      * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4551      * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
4552      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4553      *
4554      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4555      * With the default data layout NHWC, the data is stored in the order of:
4556      * [batch, height, width, channels]. Alternatively, the data layout could
4557      * be NCHW, the data storage order of: [batch, channels, height, width].
4558      *
4559      * Both explicit padding and implicit padding are supported.
4560      *
4561      * Inputs (explicit padding):
4562      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4563      *      specifying the input.
4564      * * 1: A 4-D tensor, of shape
4565      *      [depth_out, filter_height, filter_width, depth_in], specifying the
4566      *      filter. For tensor of type
4567      *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
4568      *      dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
4569      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4570      *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
4571      *      {@link OperandType::TENSOR_FLOAT16}, the bias must be of the
4572      *      same type.
4573      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
4574      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
4575      *      the bias should be of {@link OperandType::TENSOR_INT32},
4576      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
4577      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
4578      *      the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
4579      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
4580      *      bias_scale[i] = input_scale * filter_scale[i].
4581      * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
4582      *      the left, in the ‘width’ dimension.
4583      * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
4584      *      the right, in the ‘width’ dimension.
4585      * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
4586      *      the top, in the ‘height’ dimension.
4587      * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
4588      *      the bottom, in the ‘height’ dimension.
4589      * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
4590      *      walking through input in the ‘width’ dimension.
4591      * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
4592      *      walking through input in the ‘height’ dimension.
4593      * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
4594      *      {@link FusedActivationFunc} values. Specifies the activation to
4595      *      invoke on the result.
4596      * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
4597      *       NCHW data layout for input0 and output0. Set to false for NHWC.
4598      *
4599      * Inputs (implicit padding):
4600      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4601      *      specifying the input.
4602      * * 1: A 4-D tensor, of shape
4603      *      [depth_out, filter_height, filter_width, depth_in], specifying the
4604      *      filter. For tensor of type
4605      *      {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
4606      *      dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
4607      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4608      *      tensor of type {@link OperandType::TENSOR_FLOAT32} or
4609      *      {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
4610      *      same type.
4611      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
4612      *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
4613      *      the bias should be of {@link OperandType::TENSOR_INT32},
4614      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
4615      *      For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
4616      *      the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
4617      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
4618      *      bias_scale[i] = input_scale * filter_scale[i].
4619      * * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output
4620      *      tensor shape.
4621      * * 4: An {@link OperandType::INT32} scalar, specifying the implicit
4622      *      padding scheme, has to be one of the
4623      *      following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
4624      * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
4625      *      walking through input in the ‘width’ dimension.
4626      * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
4627      *      walking through input in the ‘height’ dimension.
4628      * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
4629      *      {@link FusedActivationFunc} values. Specifies the activation to
4630      *      invoke on the result.
4631      * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
4632      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4633      *
4634      * Outputs:
4635      * * 0: The output 4-D tensor, of shape
4636      *      [batches, out_height, out_width, depth_out].
4637      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4638      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4639      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4640      */
4641     TRANSPOSE_CONV_2D = 91,
4642 
4643     /**
4644      * A recurrent neural network specified by an LSTM cell.
4645      *
4646      * Performs (fully) dynamic unrolling of input.
4647      *
4648      * This Op unrolls the input along the time dimension, and implements the
4649      * following operation for each element in the sequence
4650      * s = 1...sequence_length:
4651      *   outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
4652      *
4653      * Where LSTMOp is the LSTM op as in {@link OperandType::LSTM},
4654      * the "projection" is an optional projection layer from state and output
4655      * and the “activation” is the function passed as the
4656      * “fused_activation_function” argument (if not “NONE”).
4657      *
4658      * Supported tensor {@link OperandType}:
4659      * * {@link OperandType::TENSOR_FLOAT16}
4660      * * {@link OperandType::TENSOR_FLOAT32}
4661      *
4662      * Supported tensor rank: 3, either time-major or batch-major.
4663      *
4664      * All input and output tensors must be of the same type.
4665      *
4666      * Inputs:
4667      * * 0: The input (\f$x_t\f$).
4668      *      A 3-D tensor of shape:
4669      *        If time-major: [max_time, batch_size, input_size]
4670      *        If batch-major: [batch_size, max_time, input_size]
4671      *      where “max_time” is the number of timesteps (sequence length),
4672      *      “batch_size” corresponds to the batching dimension, and
4673      *      “input_size” is the size of the input.
4674      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
4675      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
4676      *      corresponds to the number of cell units.
4677      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
4678      *      A 2-D tensor of shape [num_units, input_size].
4679      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
4680      *      A 2-D tensor of shape [num_units, input_size].
4681      * * 4: The input-to-output weights (\f$W_{xo}\f$).
4682      *      A 2-D tensor of shape [num_units, input_size].
4683      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
4684      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
4685      *      corresponds to either the number of cell units (i.e., “num_units”),
4686      *      or the second dimension of the “projection_weights”, if defined.
4687      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
4688      *      A 2-D tensor of shape [num_units, output_size].
4689      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
4690      *      A 2-D tensor of shape [num_units, output_size].
4691      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
4692      *      A 2-D tensor of shape [num_units, output_size].
4693      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
4694      *      A 1-D tensor of shape [num_units].
4695      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
4696      *      A 1-D tensor of shape [num_units].
4697      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
4698      *      A 1-D tensor of shape [num_units].
4699      * * 12:The input gate bias (\f$b_i\f$). Optional.
4700      *      A 1-D tensor of shape [num_units].
4701      * * 13:The forget gate bias (\f$b_f\f$).
4702      *      A 1-D tensor of shape [num_units].
4703      * * 14:The cell bias (\f$b_c\f$).
4704      *      A 1-D tensor of shape [num_units].
4705      * * 15:The output gate bias (\f$b_o\f$).
4706      *      A 1-D tensor of shape [num_units].
4707      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
4708      *      A 2-D tensor of shape [output_size, num_units].
4709      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
4710      *      A 1-D tensor of shape [output_size].
4711      * * 18:The output state (in) (\f$h_{t-1}\f$).
4712      *      A 2-D tensor of shape [batch_size, output_size].
4713      * * 19:The cell state (in) (\f$C_{t-1}\f$).
4714      *      A 2-D tensor of shape [batch_size, num_units].
4715      * * 20:The activation function (\f$g\f$).
4716      *      A value indicating the activation function:
4717      *      <ul>
4718      *      <li>0: None;
4719      *      <li>1: Relu;
4720      *      <li>3: Relu6;
4721      *      <li>4: Tanh;
4722      *      <li>6: Sigmoid.
4723      *      </ul>
4724      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
4725      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
4726      *      then clipping is disabled.
4727      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
4728      *      projection layer, such that values are bound within
4729      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
4730      * * 23:Time-major if true, batch-major if false.
4731      * * 24:The input layer normalization weights. Optional.
4732      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
4733      *      to activation at input gate.
4734      * * 25:The forget layer normalization weights. Optional.
4735      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
4736      *      to activation at forget gate.
4737      * * 26:The cell layer normalization weights. Optional.
4738      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
4739      *      to activation at cell gate.
4740      * * 27:The output layer normalization weights. Optional.
4741      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
4742      *      to activation at output gate.
4743      *
4744      * Outputs:
4745      * * 0: The output (\f$o_t\f$).
4746      *      A 3-D tensor of shape:
4747      *        If time-major: [max_time, batch_size, output_size]
4748      *        If batch-major: [batch_size, max_time, output_size]
4749      * * 1: A tensor of shape [batch_size, output_size] containing a hidden
4750      *      state from the last time step in the sequence. This output is
4751      *      optional and can be omitted. If this output is present then
4752      *      output #2 must be present as well.
4753      *      Available since HAL version 1.3.
4754      * * 2: A tensor of shape [batch_size, cell_size] containing a cell state
4755      *      from the last time step in the sequence. This output is optional
4756      *      and can be omitted.
4757      *      Available since HAL version 1.3.
4758      */
4759     UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
4760 
4761     /**
4762      * A recurrent neural network layer that applies a basic RNN cell to a
4763      * sequence of inputs.
4764      *
4765      * This layer unrolls the input along the sequence dimension, and implements
4766      * the following operation
4767      * for each element in the sequence s = 1...sequence_length:
4768      *   outputs[s] = state = activation(inputs[s] * input_weights’ + state *
4769      *   recurrent_weights’ + bias)
4770      *
4771      * Where:
4772      * * “input_weights” is a weight matrix that multiplies the inputs;
4773      * * “recurrent_weights” is a weight matrix that multiplies the current
4774      *    “state” which itself is the output from the previous time step
4775      *    computation;
4776      * * “bias” is a bias vector (added to each output vector in the batch);
4777      * * “activation” is the function passed as the “fused_activation_function”
4778      *   argument (if not “NONE”).
4779      *
4780      * Supported tensor {@link OperandType}:
4781      * * {@link OperandType::TENSOR_FLOAT16}
4782      * * {@link OperandType::TENSOR_FLOAT32}
4783      *
4784      * The input tensors must all be the same type.
4785      *
4786      * Inputs:
4787      * * 0: input.
4788      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
4789      *      it is set to 1, then the input has a shape [maxTime, batchSize,
4790      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
4791      *      inputSize].
4792      * * 1: weights.
4793      *      A 2-D tensor of shape [numUnits, inputSize].
4794      * * 2: recurrent_weights.
4795      *      A 2-D tensor of shape [numUnits, numUnits].
4796      * * 3: bias.
4797      *      A 1-D tensor of shape [numUnits].
4798      * * 4: hidden state
4799      *      A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
4800      *      state input for the first time step of the computation.
4801      * * 5: fusedActivationFunction.
4802      *      A {@link FusedActivationFunc} value indicating the activation function. If
4803      *      “NONE” is specified then it results in a linear activation.
4804      * * 6: timeMajor
4805      *      An {@link OperandType::INT32} scalar specifying the shape format
4806      *      of input and output tensors. Must be set to either 0 or 1.
4807      * Outputs:
4808      * * 0: output.
4809      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
4810      *      it is set to 1, then the output has a shape [maxTime, batchSize,
4811      *      numUnits], otherwise the output has a shape [batchSize, maxTime,
4812      *      numUnits].
4813      */
4814     UNIDIRECTIONAL_SEQUENCE_RNN = 93,
4815 
4816     /**
4817      * Resizes images to given size using the nearest neighbor interpretation.
4818      *
4819      * Resized images must be distorted if their output aspect ratio is not the
4820      * same as input aspect ratio. The corner pixels of output may not be the
4821      * same as corner pixels of input.
4822      *
4823      * Supported tensor {@link OperandType}:
4824      * * {@link OperandType::TENSOR_FLOAT16}
4825      * * {@link OperandType::TENSOR_FLOAT32}
4826      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
4827      *
4828      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4829      * With the default data layout NHWC, the data is stored in the order of:
4830      * [batch, height, width, channels]. Alternatively, the data layout could
4831      * be NCHW, the data storage order of: [batch, channels, height, width].
4832      *
4833      * Both resizing by shape and resizing by scale are supported.
4834      *
4835      * Inputs (resizing by shape):
4836      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
4837      *      the input. Zero batches is supported for this tensor.
4838      * * 1: An {@link OperandType::INT32} scalar, specifying the output
4839      *      width of the output tensor.
4840      * * 2: An {@link OperandType::INT32} scalar, specifying the output
4841      *      height of the output tensor.
4842      * * 3: An {@link OperandType::BOOL} scalar, default to false.
4843      *      Set to true to specify NCHW data layout for input0 and output0.
4844      * * 4: Align corners. An optional {@link OperandType::BOOL}
4845      *      scalar, default to false.  If True, the centers of the 4 corner
4846      *      pixels of the input and output tensors are aligned, preserving the
4847      *      values at the corner pixels.
4848      *      Available since HAL version 1.3.
4849      * * 5: Half pixel centers. An optional {@link OperandType::BOOL}
4850      *      scalar, default to false. If True, the pixel centers are assumed to
4851      *      be at (0.5, 0.5). This is the default behavior of image.resize in
4852      *      TF 2.0. If this parameter is True, then align_corners parameter
4853      *      must be False.
4854      *      Available since HAL version 1.3.
4855      *
4856      * Inputs (resizing by scale):
4857      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
4858      *      the input. Zero batches is supported for this tensor.
4859      * * 1: A scalar, specifying width_scale, the scaling factor of the width
4860      *      dimension from the input tensor to the output tensor. The output
4861      *      width is calculated as new_width = floor(width * width_scale).
4862      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
4863      *      of {@link OperandType::TENSOR_FLOAT16} and of
4864      *      {@link OperandType::FLOAT32} otherwise.
4865      * * 2: A scalar, specifying height_scale, the scaling factor of the height
4866      *      dimension from the input tensor to the output tensor. The output
4867      *      height is calculated as new_height = floor(height * height_scale).
4868      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
4869      *      of {@link OperandType::TENSOR_FLOAT16} and of
4870      *      {@link OperandType::FLOAT32} otherwise.
4871      * * 3: An {@link OperandType::BOOL} scalar, default to false.
4872      *      Set to true to specify NCHW data layout for input0 and output0.
4873      * * 4: Align corners. An optional {@link OperandType::BOOL}
4874      *      scalar, default to false.  If True, the centers of the 4 corner
4875      *      pixels of the input and output tensors are aligned, preserving the
4876      *      values at the corner pixels.
4877      *      Available since HAL version 1.3.
4878      * * 5: Half pixel centers. An optional {@link OperandType::BOOL}
4879      *      scalar, default to false. If True, the pixel centers are assumed to
4880      *      be at (0.5, 0.5). This is the default behavior of image.resize in
4881      *      TF 2.0. If this parameter is True, then align_corners parameter
4882      *      must be False.
4883      *      Available since HAL version 1.3.
4884      *
4885      * Outputs:
4886      * * 0: The output 4-D tensor, of shape
4887      *      [batches, new_height, new_width, depth].
4888      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
4889      *      {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4890      *      the scale and zeroPoint must be the same as input0.
4891      */
4892     RESIZE_NEAREST_NEIGHBOR = 94,
4893 
4894     /**
4895      * Quantized version of {@link OperationType::LSTM}.
4896      *
4897      * The input and the output use asymmetric quantized types, while the rest
4898      * use symmetric ones.
4899      *
4900      * Inputs:
4901      * * 0: The input to the LSTM cell.
4902      *      Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
4903      *      Shape: [batchSize, inputSize]
4904      * * 1: The input-to-input weights. Optional.
4905      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4906      *      Shape: [numUnits, inputSize]
4907      * * 2: The input-to-forget weights.
4908      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4909      *      Shape: [numUnits, inputSize]
4910      * * 3: The input-to-cell weights.
4911      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4912      *      Shape: [numUnits, inputSize]
4913      * * 4: The input-to-output weights.
4914      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4915      *      Shape: [numUnits, inputSize]
4916      * * 5: The recurrent-to-input weights. Optional.
4917      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4918      *      Shape: [numUnits, outputSize]
4919      * * 6: The recurrent-to-forget weights.
4920      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4921      *      Shape: [numUnits, outputSize]
4922      * * 7: The recurrent-to-cell weights.
4923      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4924      *      Shape: [numUnits, outputSize]
4925      * * 8: The recurrent-to-output weights.
4926      *      Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4927      *      Shape: [numUnits, outputSize]
4928      * * 9: The cell-to-input weights (for peephole). Optional.
4929      *      Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4930      *      Shape: [numUnits]
4931      * * 10: The cell-to-forget weights (for peephole). Optional.
4932      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4933      *       Shape: [numUnits]
4934      * * 11: The cell-to-output weights (for peephole). Optional.
4935      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4936      *       Shape: [numUnits]
4937      * * 12: The input gate bias. Quantized with scale being the
4938      *       product of input and weights scales and zeroPoint equal to 0.
4939      *       Optional.
4940      *       Type: {@link OperandType::TENSOR_INT32}
4941      *       Shape: [numUnits]
4942      * * 13: The forget gate bias. Quantized with scale being the
4943      *       product of input and weights scales and zeroPoint equal to 0.
4944      *       Type: {@link OperandType::TENSOR_INT32}
4945      *       Shape: [numUnits]
4946      * * 14: The cell bias. Quantized with scale being the
4947      *       product of input and weights scales and zeroPoint equal to 0.
4948      *       Type: {@link OperandType::TENSOR_INT32}
4949      *       Shape: [numUnits]
4950      * * 15: The output gate bias. Quantized with scale being the
4951      *       product of input and weights scales and zeroPoint equal to 0.
4952      *       Type: {@link OperandType::TENSOR_INT32}
4953      *       Shape: [numUnits]
4954      * * 16: The projection weights. Optional.
4955      *       Type: {@link OperandType::TENSOR_QUANT8_SYMM}
4956      *       Shape: [outputSize, numUnits]
4957      * * 17: The projection bias. Quantized with scale being the
4958      *       product of input and weights scales and zeroPoint equal to 0.
4959      *       Optional.
4960      *       Type: {@link OperandType::TENSOR_INT32}
4961      *       Shape: [outputSize]
4962      * * 18: The output from the previous time step.
4963      *       Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
4964      *       Shape: [batchSize, outputSize]
4965      * * 19: The cell state from the previous time step.
4966      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4967      *       Shape: [batchSize, numUnits]
4968      * * 20: The input layer normalization weights. Used to rescale
4969      *       normalized inputs to activation at input gate. Optional.
4970      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4971      *       Shape: [numUnits]
4972      * * 21: The forget layer normalization weights. Used to
4973      *       rescale normalized inputs to activation at forget gate. Optional.
4974      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4975      *       Shape: [numUnits]
4976      * * 22: The cell layer normalization weights. Used to rescale
4977      *       normalized inputs to activation at cell gate. Optional.
4978      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4979      *       Shape: [numUnits]
4980      * * 23: The output layer normalization weights. Used to
4981      *       rescale normalized inputs to activation at output gate. Optional.
4982      *       Type: {@link OperandType::TENSOR_QUANT16_SYMM}
4983      *       Shape: [numUnits]
4984      * * 24: The cell clip. If provided the cell state is clipped
4985      *       by this value prior to the cell output activation. Optional.
4986      *       Type: {@link OperandType::FLOAT32}.
4987      * * 25: The projection clip. If provided and projection is enabled,
4988      *       this is used for clipping the projected values. Optional.
4989      *       Type: {@link OperandType::FLOAT32}.
4990      * * 26: The scale of the intermediate result of matmul,
4991      *       i.e. input to layer normalization, at input gate.
4992      *       Type: {@link OperandType::FLOAT32}.
4993      * * 27: The scale of the intermediate result of matmul,
4994      *       i.e. input to layer normalization, at forget gate.
4995      *       Type: {@link OperandType::FLOAT32}.
4996      * * 28: The scale of the intermediate result of matmul,
4997      *       i.e. input to layer normalization, at cell gate.
4998      *       Type: {@link OperandType::FLOAT32}.
4999      * * 29: The scale of the intermediate result of matmul,
5000      *       i.e. input to layer normalization, at output gate.
5001      *       Type: {@link OperandType::FLOAT32}.
5002      * * 30: The zero point of the hidden state, i.e. input to
5003      *       projection.
5004      *       Type: {@link OperandType::INT32}.
5005      * * 31: The scale of the hidden state, i.e. input to
5006      *       projection.
5007      *       Type: {@link OperandType::FLOAT32}.
5008      *
5009      * Outputs:
5010      * * 0: The output state (out).
5011      *      Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5012      *      Shape: [batchSize, outputSize]
5013      * * 1: The cell state (out).
5014      *      Type: {@link OperandType::TENSOR_QUANT16_SYMM}
5015      *      Shape: [batchSize, numUnits]
5016      * * 2: The output. This is effectively the same as the current
5017      *      "output state (out)" value.
5018      *      Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5019      *      Shape: [batchSize, outputSize]
5020      */
5021     QUANTIZED_LSTM = 95,
5022 
5023     /**
5024      * Executes one of the two referenced subgraphs as determined by a boolean
5025      * value.
5026      *
5027      * The inputs and outputs of the two referenced subgraphs must agree with the
5028      * signature of this operation. That is, if the operation has (3 + n) inputs
5029      * and m outputs, both subgraphs must have n inputs and m outputs with the same
5030      * types, ranks, dimensions, scales,
5031      * zeroPoints, and extraParams as the corresponding operation
5032      * inputs and outputs.
5033      * All of the operands mentioned must have fully specified dimensions.
5034      *
5035      * Inputs:
5036      * * 0: A value of type {@link OperandType::TENSOR_BOOL8} and shape [1]
5037      *      that determines which of the two referenced subgraphs to execute.
5038      *      The operand must have fully specified dimensions.
5039      * * 1: A {@link OperandType::SUBGRAPH} reference to the subgraph to be
5040      *      executed if the condition is true.
5041      * * 2: A {@link OperandType::SUBGRAPH} reference to the subgraph to be
5042      *      executed if the condition is false.
5043      * * 3 ~ (n + 2): Inputs to be passed to the subgraph selected for execution.
5044      *
5045      * Outputs:
5046      * * 0 ~ (m - 1): Outputs produced by the selected subgraph.
5047      */
5048     IF = 96,
5049 
5050     /**
5051      * Executes the body subgraph until the condition subgraph outputs false.
5052      *
5053      * The inputs to this operation are the condition subgraph, the body subgraph,
5054      * and operand values for the first iteration of the loop. The values are
5055      * implicitly split into three groups of input-output, state-only, and
5056      * input-only values, as described below.
5057      *
5058      * The outputs of this operation are the final values of input-output
5059      * operands.
5060      *
5061      * Both the condition and body subgraph receive (m + k + n) inputs.
5062      * * The first m (m >= 1) inputs are input-output operands. For the first
5063      *   iteration, these are initialized from the corresponding inputs of the
5064      *   WHILE operation. In subsequent iterations, their values come from the
5065      *   corresponding outputs of the body subgraph produced during the previous
5066      *   iteration.
5067      * * The next k (k >= 0) inputs are state-only operands. They are similar to
5068      *   the input-output operands, except that their values are no longer
5069      *   available after the loop terminates.
5070      * * The last n (n >= 0) inputs are input-only operands. Their values come
5071      *   from the corresponding inputs of the WHILE operation.
5072      *
5073      * The body subgraph produces (m + k) outputs.
5074      * * The first m outputs are input-output operands. They become the outputs
5075      *   of the WHILE operation when a termination condition is reached.
5076      * * The last k outputs are state-only operands. Their values are no longer
5077      *   available after the loop terminates.
5078      *
5079      * The numbers m, k, and n are inferred by the driver as follows:
5080      *     m = (WHILE operation output count)
5081      *     k = (body subgraph output count) - m
5082      *     n = (body subgraph input count) - m - k
5083      *
5084      * The pseudo-code below illustrates the flow of a WHILE operation with
5085      * inputs condition, body, initial_input_output, initial_state, input_only
5086      * (m = 1, k = 1, n = 1):
5087      *
5088      *     input_output = initial_input_output
5089      *     state = initial_state
5090      *     while condition(input_output, state, input_only):
5091      *         input_output, state = body(input_output, state, input_only)
5092      *     return input_output
5093      *
5094      * Inputs:
5095      * * 0: A {@link OperandType::SUBGRAPH} reference to the condition
5096      *      subgraph. The subgraph must have (m + k + n) inputs with
5097      *      the same types, ranks, dimensions,
5098      *      scales, zeroPoints, and extraParams as the
5099      *      corresponding inputs of the WHILE operation and exactly one output
5100      *      of {@link OperandType::TENSOR_BOOL8} and shape [1].
5101      *      All of the operands mentioned must have fully specified dimensions.
5102      * * 1: A {@link OperandType::SUBGRAPH} reference to the body subgraph.
5103      *      The subgraph must have (m + k + n) inputs and (m + k) outputs with
5104      *      the same types, ranks, dimensions,
5105      *      scales, zeroPoints, and extraParams as the
5106      *      corresponding inputs and outputs of the WHILE operation.
5107      *      All of the operands mentioned must have fully specified dimensions.
5108      * * (m inputs): Initial values for input-output operands.
5109      * * (k inputs): Initial values for state-only operands.
5110      * * (n inputs): Values for input-only operands.
5111      *
5112      * Outputs:
5113      * * 0 ~ (m - 1): Outputs produced by the loop.
5114      */
5115     WHILE = 97,
5116 
5117     /**
5118      * Computes exponential linear activation on the input tensor element-wise.
5119      *
5120      * The output is calculated using the following formula:
5121      *
5122      *     ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1))
5123      *
5124      * Supported tensor {@link OperandType}:
5125      * * {@link OperandType::TENSOR_FLOAT16}
5126      * * {@link OperandType::TENSOR_FLOAT32}
5127      *
5128      * Supported tensor rank: from 1.
5129      *
5130      * Inputs:
5131      * * 0: A tensor, specifying the input. May be zero-sized.
5132      * * 1: A scalar, specifying the alpha parameter.
5133      *      For input tensor of {@link OperandType::TENSOR_FLOAT16},
5134      *      the alpha value must be of {@link OperandType::FLOAT16}.
5135      *      For input tensor of {@link OperandType::TENSOR_FLOAT32},
5136      *      the alpha value must be of {@link OperandType::FLOAT32}.
5137      *
5138      * Outputs:
5139      * * 0: The output tensor of same shape and type as input0.
5140      */
5141     ELU = 98,
5142 
5143     /**
5144      * Computes hard-swish activation on the input tensor element-wise.
5145      *
5146      * Hard swish activation is introduced in
5147      * https://arxiv.org/pdf/1905.02244.pdf
5148      *
5149      * The output is calculated using the following formula:
5150      *
5151      *     h-swish(x) = x * max(0, min(6, (x + 3))) / 6
5152 
5153      * Supported tensor {@link OperandType}:
5154      * * {@link OperandType::TENSOR_FLOAT16}
5155      * * {@link OperandType::TENSOR_FLOAT32}
5156      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
5157      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5158      *
5159      * Supported tensor rank: from 1.
5160      *
5161      * Inputs:
5162      * * 0: A tensor, specifying the input. May be zero-sized.
5163      *
5164      * Outputs:
5165      * * 0: The output tensor of same shape and type as input0.
5166      *      Scale and zero point of this tensor may be different from the input
5167      *      tensor's parameters.
5168      */
5169     HARD_SWISH = 99,
5170 
5171     /**
5172      * Creates a tensor filled with a scalar value.
5173      *
5174      * Supported output tensor {@link OperandType}:
5175      * * {@link OperandType::TENSOR_FLOAT16}
5176      * * {@link OperandType::TENSOR_FLOAT32}
5177      * * {@link OperandType::TENSOR_INT32}
5178      *
5179      * Supported tensor rank: from 1.
5180      *
5181      * Inputs:
5182      * * 0: A 1-D tensor, specifying the desired output tensor shape.
5183      * * 1: A scalar, specifying the value to fill the output tensors with.
5184      *      For output tensor of {@link OperandType::TENSOR_FLOAT16},
5185      *      the scalar must be of {@link OperandType::FLOAT16}.
5186      *      For output tensor of {@link OperandType::TENSOR_FLOAT32},
5187      *      the scalar must be of {@link OperandType::FLOAT32}.
5188      *      For output tensor of {@link OperandType::TENSOR_INT32},
5189      *      the scalar must be of {@link OperandType::INT32}.
5190      *
5191      * Outputs:
5192      * * 0: The output tensor.
5193      */
5194     FILL = 100,
5195 
5196     /**
5197      * Returns the rank of a tensor.
5198      *
5199      * The rank of a tensor is the number of dimensions in it. Also known as
5200      * "order", "degree", "ndims".
5201      *
5202      * Supported tensor {@link OperandType}:
5203      * * {@link OperandType::TENSOR_FLOAT16}
5204      * * {@link OperandType::TENSOR_FLOAT32}
5205      * * {@link OperandType::TENSOR_INT32}
5206      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
5207      * * {@link OperandType::TENSOR_QUANT16_SYMM}
5208      * * {@link OperandType::TENSOR_BOOL8}
5209      * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
5210      * * {@link OperandType::TENSOR_QUANT16_ASYMM}
5211      * * {@link OperandType::TENSOR_QUANT8_SYMM}
5212      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
5213      *
5214      * Supported tensor rank: from 1.
5215      *
5216      * Inputs:
5217      * * 0: The input tensor.
5218      *
5219      * Outputs:
5220      * * 0: A scalar of {@link OperandType::INT32}, specifying the rank
5221      *      of the input tensor.
5222      */
5223     RANK = 101,
5224 
5225     /**
5226      * DEPRECATED. Since HAL version 1.2, extensions are the preferred
5227      * alternative to OEM operation and data types.
5228      *
5229      * This operation is OEM specific. It should only be used for OEM
5230      * applications.
5231      */
5232     OEM_OPERATION = 10000,
5233 };
5234 
5235 }  // namespace android::nn
5236 
5237 #endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERATION_TYPES_H
5238