/* * Copyright (c) 2024 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @addtogroup NNRt * @{ * * @brief Provides a unified interface for AI chip drivers to access OpenHarmony. * Neural Network Runtime (NNRt) is a cross-chip inference computing runtime environment oriented to the AI field. * * @since 3.2 * @version 2.1 */ /** * @file NnrtTypes.idl * * @brief Defines the types used in the HDI methods. * * @since 3.2 * @version 2.1 */ /** * @brief Defines the package path of the NNRt module. * * @since 3.2 * @version 2.1 */ package ohos.hdi.nnrt.v2_1; /** * @brief Defines the shared memory data structure. * * @since 3.2 * @version 2.1 */ struct SharedBuffer { /** File descriptor of the shared memory. */ FileDescriptor fd; /** Size of the shared memory, in bytes. */ unsigned int bufferSize; /** Offset of the start address of the valid data in the shared memory. */ unsigned int offset; /** Space occupied by the valid data, in bytes. */ unsigned int dataSize; }; /** * @brief Enumerates the AI chip types. * * @since 3.2 * @version 2.1 */ enum DeviceType: int { /** Other types. */ OTHER, /** CPU chip. */ CPU, /** GPU chip. */ GPU, /** AI acceleration chip, such as NPU chip and DSP chip. */ ACCELERATOR }; /** * @brief Enumerates the AI chip states. * * @since 3.2 * @version 2.1 */ enum DeviceStatus: int { /** Available. */ AVAILABLE, /** Busy. The chip in this state may not respond to computing tasks in a timely manner. */ BUSY, /** Offline. The chip in this state cannot respond to computing tasks. */ OFFLINE, /** Unknown state. */ UNKNOWN }; /** * @brief Enumerates the performance modes for a chip to perform AI computing. * * @since 3.2 * @version 2.1 */ enum PerformanceMode: int { /** No performance mode is specified. The specific running mode is defined by the chip. */ PERFORMANCE_NONE, /** Low-performance mode, which provides slow AI computing but low power consumption. */ PERFORMANCE_LOW, /** Medium-performance mode, which provides moderate computing speed and power consumption. */ PERFORMANCE_MEDIUM, /** High-performance mode, which provides fast AI computing but high power consumption. */ PERFORMANCE_HIGH, /** Extreme-performance mode, which provides the fastest AI computing but highest power consumption. */ PERFORMANCE_EXTREME }; /** * @brief Enumerates the AI computing task priorities. * * @since 3.2 * @version 2.1 */ enum Priority: int { /** No task priority is specified. The specific execution policy is defined by the chip. */ PRIORITY_NONE, /** Low priority. A task with a higher priority will be executed first. */ PRIORITY_LOW, /** Medium priority. A task with a higher priority will be executed first. */ PRIORITY_MEDIUM, /** High priority. High-priority tasks are executed first. */ PRIORITY_HIGH }; /** * @brief Defines the parameters required for model building. * * @since 3.2 * @version 2.1 */ struct ModelConfig { /** Whether to run a Float32 model in Float16 precision. */ boolean enableFloat16; /** Performance mode of the computing task. For details, see {@link PerformanceMode}. */ enum PerformanceMode mode; /** Priority of the computing task. For details, see {@link Priority}. */ enum Priority priority; /** Custom attributes of the underlying hardware. They stored in the format of name:binary value * and parsed by the HDI service. */ Map extensions; }; /** * @brief Enumerates the operator data formats. This parameter must be used together with {@link Tensor}. * * @since 3.2 * @version 2.1 */ enum Format : byte { /** Format initial value. */ FORMAT_NONE = -1, /** NCHW, which indicates the number of data samples, image channels, image height, and image width in sequence. */ FORMAT_NCHW = 0, /** NHWC. */ FORMAT_NHWC = 1 }; /** * @brief Defines the quantization parameter structure. * * In the following formula, q is a quantized parameter, r is a real parameter, \f$ r_{max} \f$\n * is the maximum value of the data to be quantized, \f$ r_{min} \f$ is the minimum value of the data * to be quantized, and round(x) means to round off x to an integer. \f[ \text{clamp}(x,min,max) = \begin{cases} \text{max} & \text{ if } x > \text{ max } \\ \text{min} & \text{ if } x < \text{ min } \\ x & \text{ otherwise } \\ \end{cases} \f] * Formula for transforming a real number from a floating-point representation to a fixed-point representation: \f[ \text{q}(x_i) = clamp(round(\frac{r}{scale}+zeroPoint), min , max) \f] * Formula for transforming a real number from a fixed-point representation to a floating-point representation: \f[ \text{r}= (q-zeroPoint)*scale \f] * scale is calculated by using the following formula: \f[ scale = \frac{r_{max}-r_{min}}{q_{max}-q_{min}} \f] * zeroPoint is calculated by using the following formula: \f[ zeroPoint = round(q_{min}-\frac{r_{min}}{scale}) \f] * \f$ q_{min},q_{max} \f$ is calculated by using the following formula: \f[ q_{min} = -(1<<(numBits-1)) \f] \f[ q_{max} = (1<<(numBits-1))-1 \f] * When \f$ r_{min} \f$ and \f$ r_{max} \f$ are 0, scale and zeroPoint must be 0. * * @since 3.2 * @version 2.1 */ struct QuantParam { /** Number of quantized bits. */ int numBits; /** Zero value. */ int zeroPoint; /** Step of the quantizer. */ double scale; }; /** * @brief Enumerates the tensor data types. This parameter must be used together with {@link Tensor}. * * @since 3.2 * @version 2.1 */ enum DataType : byte { /** Unknown type. */ DATA_TYPE_UNKNOWN = 0, /** Boolean. */ DATA_TYPE_BOOL = 30, /** INT8. */ DATA_TYPE_INT8 = 32, /** INT16. */ DATA_TYPE_INT16 = 33, /** INT32. */ DATA_TYPE_INT32 = 34, /** INT64. */ DATA_TYPE_INT64 = 35, /** UINT8. */ DATA_TYPE_UINT8 = 37, /** UINT16. */ DATA_TYPE_UINT16 = 38, /** UINT32. */ DATA_TYPE_UINT32 = 39, /** UINT64. */ DATA_TYPE_UINT64 = 40, /** FLOAT16. */ DATA_TYPE_FLOAT16 = 42, /** FLOAT32. */ DATA_TYPE_FLOAT32 = 43, /** FLOAT64. */ DATA_TYPE_FLOAT64 = 44, }; /** * @brief Defines the input and output tensors of an AI model. * * @since 3.2 * @version 2.1 */ struct IOTensor { /** Tensor name. */ String name; /** Data type of the tensor. For details, see {@link DataType}. */ enum DataType dataType; /** Dimensions of the tensor. */ int[] dimensions; /** Format of the tensor. For details, see {@link Format}. */ enum Format format; /** Tensor data, which is stored in the shared memory. For details about the shared memory,\n * see {@link SharedBuffer}. */ struct SharedBuffer data; }; /** * @brief Enumerates the quantization types. This parameter must be used together with {@link Node}. * * @since 3.2 * @version 2.1 */ enum QuantType: byte { /** Do not use quantification. */ QUANT_TYPE_NONE, /** INT8 quantization. */ QUANT_TYPE_ALL, }; /** * @brief Enumerates the operator types. * * @since 3.2 * @version 2.1 */ enum NodeType : unsigned int { /** None. */ NODE_TYPE_NONE = 0, /** Abs operator. */ NODE_TYPE_ABS = 1, /** Activation function. */ NODE_TYPE_ACTIVATION = 2, /** ADD operator. */ NODE_TYPE_ADD_FUSION = 5, /** ALL operator. */ NODE_TYPE_ALL = 9, /** ArgMax operator. */ NODE_TYPE_ARGMAX_FUSION = 11, /** Assert operator. */ NODE_TYPE_ASSERT = 13, /** AVGPOOL operator. */ NODE_TYPE_AVGPOOL_FUSION = 17, /** BatchToSpaceND operator. */ NODE_TYPE_BATCH_TO_SPACE_ND = 22, /** BiasAdd operator. */ NODE_TYPE_BIAS_ADD = 23, /** BroadCastTo operator. */ NODE_TYPE_BROADCAST_TO = 27, /** Cast operator. */ NODE_TYPE_CAST = 28, /** Ceil operator. */ NODE_TYPE_CEIL = 29, /** Clip operator. */ NODE_TYPE_CLIP = 30, /** Concat operator. */ NODE_TYPE_CONCAT = 31, /** Conv2D operator, including common convolution, separable convolution, and group convolution. */ NODE_TYPE_CONV2D_FUSION = 35, /** Two-dimensional deconvolution operator. */ NODE_TYPE_CONV2D_TRANSPOSE_FUSION = 36, /** Cos operator. */ NODE_TYPE_COS = 37, /** ConstantOfShape operator. */ NODE_TYPE_CONSTANT_OF_SHAPE = 38, /** Crop operator. */ NODE_TYPE_CROP = 39, /** DepthToSpace operator. */ NODE_TYPE_DEPTH_TO_SPACE = 45, /** DetectionPostProcess operator. */ NODE_TYPE_DETECTION_POST_PROCESS = 46, /** Div operator. */ NODE_TYPE_DIV_FUSION = 47, /** Element-level operator. */ NODE_TYPE_ELTWISE = 52, /** Equal operator. */ NODE_TYPE_EQUAL = 53, /** Exp operator. */ NODE_TYPE_EXPFUSION = 55, /** ExpandDims operator. */ NODE_TYPE_EXPAND_DIMS = 56, /** Flatten operator. */ NODE_TYPE_FLATTEN = 61, /** Floor operator. */ NODE_TYPE_FLOOR = 63, /** Fill operator. */ NODE_TYPE_FILL = 66, /** FullConnection operator. */ NODE_TYPE_FULL_CONNECTION = 67, /** BatchNorm operator. */ NODE_TYPE_FUSED_BATCH_NORM = 68, /** Gather operator. */ NODE_TYPE_GATHER = 69, /** GatherNd operator. */ NODE_TYPE_GATHER_ND = 70, /** Greater operator. */ NODE_TYPE_GREATER = 71, /** GreaterEqual operator. */ NODE_TYPE_GREATER_EQUAL = 72, /** InstanceNorm operator. */ NODE_TYPE_INSTANCE_NORM = 74, /** LayerNorm operator. */ NODE_TYPE_LAYER_NORM_FUSION = 75, /** LeakyReLU operator. */ NODE_TYPE_LEAKY_RELU = 76, /** Less operator. */ NODE_TYPE_LESS = 77, /** LessEqual operator. */ NODE_TYPE_LESS_EQUAL = 78, /** Log operator. */ NODE_TYPE_LOG = 79, /** LogicalAnd operator. */ NODE_TYPE_LOGICAL_AND = 81, /** LogicalNot operator. */ NODE_TYPE_LOGICAL_NOT = 82, /** LogicalOr operator. */ NODE_TYPE_LOGICAL_OR = 83, /** LRN operator. */ NODE_TYPE_LRN = 85, /** LSTM operator. */ NODE_TYPE_LSTM = 87, /** L2NormalizeFusion operator. */ NODE_TYPE_L2_NORMALIZE_FUSION = 88, /** MatMul operator. */ NODE_TYPE_MATMUL_FUSION = 89, /** Maximum operator. */ NODE_TYPE_MAXIMUM = 90, /** MaxPool operator. */ NODE_TYPE_MAX_POOL_FUSION = 92, /** Minimum operator. */ NODE_TYPE_MINIMUM = 96, /** Mod operator. */ NODE_TYPE_MOD = 98, /** Mul operator. */ NODE_TYPE_MUL_FUSION = 99, /** Neg operator. */ NODE_TYPE_NEG = 101, /** NotEqual operator. */ NODE_TYPE_NOT_EQUAL = 103, /** OneHot operator. */ NODE_TYPE_ONE_HOT = 105, /** Pad operator. */ NODE_TYPE_PAD_FUSION = 107, /** Pow operator. */ NODE_TYPE_POW_FUSION = 110, /** PReLU operator. */ NODE_TYPE_PRELU_FUSION = 112, /** QuantDTypeCast operator. */ NODE_TYPE_QUANT_DTYPE_CAST = 113, /** Rank operator. */ NODE_TYPE_RANK = 114, /** Range operator. */ NODE_TYPE_RANGE = 115, /** Reciprocal operator. */ NODE_TYPE_RECIPROCAL = 116, /** RealDiv operator. */ NODE_TYPE_REAL_DIV = 117, /** Reduce operator. */ NODE_TYPE_REDUCE_FUSION = 118, /** Reshape operator. */ NODE_TYPE_RESHAPE = 119, /** Resize operator. */ NODE_TYPE_RESIZE = 120, /** Round operator. */ NODE_TYPE_ROUND = 125, /** Rsqrt operator. */ NODE_TYPE_RSQRT = 126, /** Scale operator. */ NODE_TYPE_SCALE_FUSION = 127, /** ScatterNd operator. */ NODE_TYPE_SCATTER_ND = 128, /** Shape operator. */ NODE_TYPE_SHAPE = 130, /** Sin operator. */ NODE_TYPE_SIN = 133, /** Slice operator. */ NODE_TYPE_SLICE_FUSION = 135, /** Softmax operator. */ NODE_TYPE_SOFTMAX = 138, /** SpaceToBatchND operator. */ NODE_TYPE_SPACE_TO_BATCH_ND = 141, /** SpaceToDepth operator. */ NODE_TYPE_SPACE_TO_DEPTH = 142, /** SparseToDense operator. */ NODE_TYPE_SPARSE_TO_DENSE = 144, /** Split operator. */ NODE_TYPE_SPLIT = 145, /** Sqrt operator. */ NODE_TYPE_SQRT = 146, /** Squeeze operator. */ NODE_TYPE_SQUEEZE = 147, /** Square operator. */ NODE_TYPE_SQUARE = 148, /** SquaredDifference operator. */ NODE_TYPE_SQUARED_DIFFERENCE = 149, /** Stack operator. */ NODE_TYPE_STACK = 150, /** StridedSlice operator. */ NODE_TYPE_STRIDED_SLICE = 151, /** Sub operator. */ NODE_TYPE_SUB_FUSION = 152, /** Tile operator. */ NODE_TYPE_TILE_FUSION = 160, /** TopK operator. */ NODE_TYPE_TOPK_FUSION = 161, /** Transpose operator. */ NODE_TYPE_TRANSPOSE = 162, /** Unsqueeze operator. */ NODE_TYPE_UNSQUEEZE = 165, /** Unstack operator. */ NODE_TYPE_UNSTACK = 166, /** Where operator. */ NODE_TYPE_WHERE = 168, /** Select operator. */ NODE_TYPE_SELECT = 170, /** Erf operator. */ NODE_TYPE_ERF = 178, /** LogSoftmax operator. */ NODE_TYPE_LOG_SOFTMAX = 189, /** QuantDTypeCastV2 operator. */ NODE_TYPE_QUANT_DTYPE_CAST_V2 = 999, }; /** * @brief Enumerates the resize methods. It must be used together with the {@link Resize} operator. * * @since 3.2 * @version 2.1 */ enum ResizeMethod : byte { /** Unknown. This is the default value. */ RESIZE_METHOD_UNKNOWN = -1, /** Bilinear interpolation. * For example, calculate the value of an unknown function f at point \f$ (x,y) \f$, where\n * \f$ x_1< x < x_2, y_1< y < y_2 \f$. * The values of the four coordinate points are \f$ Q_{11} = (x_1, y_1), Q_{12} = (x1, y2), Q_{21} = (x_2, y_1),\n * and Q_{22} = (x_2, y_2) \f$. * \f$f(Q_{11}), f(Q_{12}), f(Q_{21}), and f(Q_{22}) \f$ represent the values of the four points.\n * The value of \f$ f(x,y) \f$ can be calculated by using the following formula: \f[ f(x,y_1) = \frac{x_2-x}{x_2-x_1}f(Q_{11})+\frac{x-x_1}{x_2-x_1}f(Q_{21}) \f] \f[ f(x,y_2) = \frac{x_2-x}{x_2-x_1}f(Q_{12})+\frac{x-x_1}{x_2-x_1}f(Q_{22}) \f] \f[ f(x,y) = \frac{y_2-y}{y_2-y_1}f(x,y_1)+\frac{y-y_1}{y_2-y_1}f(x,y_2) \f] */ RESIZE_METHOD_LINEAR = 0, /** Nearest neighbor interpolation. * For example, calculate the value of an unknown function f at point \f$ (x,y) \f$, where\n * \f$ x_1< x < x_2, y_1< y < y_2 \f$. * The values of the four coordinate points are \f$ Q_{11} = (x_1, y_1), Q_{12} = (x1, y2),\n * Q_{21} = (x_2, y_1), and Q_{22} = (x_2, y_2) \f$. * Then, the value of the point closest to the point \f$(x,y) \f$ is the value of \f$ f(x,y) \f$. */ RESIZE_METHOD_NEAREST = 1, /** Bicubic interpolation. * Bicubic interpolation obtains the value of a sampling point by calculating the weighted average of the\n * values of 16 points around the sampling point. This parameter must be used together with cubicCoeff\n * and coordinateTransformMode of {@link Resize}. * When coordinateTransformMode==COORDINATE_TRANSFORM_MODE_HALF_PIXEL, cubicCoeff is -0.5.\n * In other cases, cubicCoeff is -0.75. The weight function of the interpolation is as follows: \f[ W(x) = \begin{cases} (cubicCoeff+2)|x|^3 - (cubicCoeff+3)|x|^2 +1 , &\text{if } |x| \leq 1; \cr cubicCoeff|x|^3 - 5cubicCoeff|x|^2 + 8cubicCoeff|x| - 4a, &\text{if } 1 \lt |x| \leq 2; \cr 0, &\text{otherwise.} \end{cases} \f] */ RESIZE_METHOD_CUBIC = 2 }; /** * @brief Enumerates the coordinate transformation modes. Only the {@link Resize} operator uses this parameter. * For example, the width coordinates are transformed, where: * new_i is the ith coordinate of the resized tensor along the x axis. * old_i is the coordinate of the input tensor along the x axis. * newWidth is the length of the resized tensor along the x axis. * oldWidth is the length of the input tensor along the x axis. * old_i can be calculated by using the following formula: * * COORDINATE_TRANSFORM_MODE_ASYMMETRIC: \f$ old_i = newWidth != 0 ? new_i * oldWidth / newWidth : 0 \f$
* COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS: \f$ old_i = newWidth != 1 ? new_i * (oldWidth - 1) / (newWidth - 1) \f$
* COORDINATE_TRANSFORM_MODE_HALF_PIXEL: \f$ old_i = newWidth > 1 ? (new_x + 0.5) * oldWidth / newWidth - 0.5 : 0 \f$
* * @since 3.2 * @version 2.1 */ enum CoordinateTransformMode : byte { /** Scale based on the ratio without alignment. */ COORDINATE_TRANSFORM_MODE_ASYMMETRIC = 0, /** Align the four corners of the image. */ COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS = 1, /** Align with the pixel center. */ COORDINATE_TRANSFORM_MODE_HALF_PIXEL = 2 }; /** * @brief Enumerates the nearest neighbor interpolation types. It must be used together with the * {@link Resize} operator. * * @since 3.2 * @version 2.1 */ enum NearestMode : byte { /** Round off. */ NEAREST_MODE_NORMAL = 0, /** Round toward negative infinity. For example, 23.5 is rounded to 23, and −23.5 is rounded to −24. */ NEAREST_MODE_ROUND_HALF_DOWN = 1, /** Round toward positive infinity. For example, 23.5 is rounded to 24, and −23.5 is rounded to −23. */ NEAREST_MODE_ROUND_HALF_UP = 2, /** Round down to the nearest integer. For example, 23.5 is rounded down to 23, and −23.5 is rounded down * to −24. */ NEAREST_MODE_FLOOR = 3, /** Round up to the nearest integer. For example, 23.5 is rounded up to 24, and −23.5 is rounded up to −23. */ NEAREST_MODE_CEIL = 4 }; /** * @brief Enumerates the activation function types. Activation functions introduce nonlinearity to neural networks.\n * This allows the use of neural network models in nonlinear models. * If an operator in the {@link NodeAttrTypes.idl} file has ActivationType parameters,\n * the corresponding activation function will be called after the operator calculation is complete. * * @since 3.2 * @version 2.1 */ enum ActivationType : byte { /** No activation function. */ ACTIVATION_TYPE_NO_ACTIVATION = 0, /** * ReLU activation function. * ReLU calculates \f$ max(x_i, 0) \f$ element by element. It outputs the value directly if it is positive;\n * otherwise, it outputs 0. \f[ \text{ReLU}(x_i) = (x_i)^+ = \max(x_i, 0), \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_RELU = 1, /** * Sigmoid activation function. * Execute the sigmoid activation function element-wise. * The sigmoid function is defined as follows: \f[ \text{Sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)} \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_SIGMOID = 2, /** * ReLU6 activation function. * ReLU6 is similar to ReLU. The difference is ReLU6 has an upper limit of 6. If the input is greater than 6,\n * the output is limited to 6. * The ReLU6 function is defined as follows: \f[ \text{ReLU6}(x_i) = \min(\max(0, x_i), 6) \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_RELU6 = 3, /** * Exponential Linear Unit (ELU) activation function. * ELU calculates the ELU for each input element. * The ELU function is defined as follows: \f[ ELU(x_{i}) = \begin{cases} x_i, &\text{if } x_i \geq 0; \cr \alpha * (\exp(x_i) - 1), &\text{otherwise.} \end{cases} \f] * \f$ x_i \f$ indicates the input element, and \f$ \alpha \f$ indicates the alpha parameter,\n * which is set by {@link Activation}. */ ACTIVATION_TYPE_ELU = 4, /** * LeakyReLU activation function. * The LeakyReLU function is defined as follows: \f[ \text{LeakyReLU}(x_i) = \begin{cases} x_i, &\text{if } x_i \geq 0; \cr {\alpha} * x_i, &\text{otherwise.} \end{cases} \f] * \f$ x_i \f$ indicates the input element, and \f$ \alpha \f$ indicates the alpha parameter,\n * which is set by {@link Activation}. */ ACTIVATION_TYPE_LEAKY_RELU = 5, /** * Activation function for calculating the absolute value. * The function is defined as follows: \f[ \text{abs}(x_i) = |x_i| \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_ABS = 6, /** * ReLU1 activation function. * The ReLU1 function is defined as follows: \f[ \text{ReLU1}(x_i)= \min(\max(0, x_i), 1) \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_RELU1 = 7, /** * SoftSign activation function. * The SoftSign function is defined as follows: \f[ \text{SoftSign}(x_i) = \frac{x_i}{1 + |x_i|} \f] * \f$ x_i \f$ is the input. */ ACTIVATION_TYPE_SOFTSIGN = 8, /** * Softplus activation function. * Softplus is a smooth approximation to ReLU. It can be used to constrain the output to always be positive. * The Softplus function is defined as follows: \f[ \text{Softplus}(x_i) = \log(1 + \exp(x_i)) \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_SOFTPLUS = 9, /** * Tanh activation function. * The Tanh function is defined as follows: \f[ tanh(x) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1} \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_TANH = 10, /** * Scaled exponential Linear Unit (SELU) activation function. * The SELU function is defined as follows: \f[ SELU(x_{i}) = scale * \begin{cases} x_{i}, &\text{if } x_{i} \geq 0; \cr \text{alpha} * (\exp(x_i) - 1), &\text{otherwise.} \end{cases} \f] * \f$ x_i \f$ is the input element, and \f$ \alpha \f$ and \f$ scale \f$ are predefined\n * constants (\f$ \alpha = 1.67326324 \f$, \f$ scale = 1.05070098 \f$). */ ACTIVATION_TYPE_SELU = 11, /** * Hard Swish activation function. * \f[ \text{Hardswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6} \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_HSWISH = 12, /** * Hard sigmoid activation function. * The hard sigmoid function is defined as follows: \f[ \text{Hardsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})) \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_HSIGMOID = 13, /** * ThresholdedReLU activation function. * ThresholdedReLU is similar to ReLU. The ThresholdedReLU function is defined as follows: \f[ \text{ThresholdedReLU}(x_i) = \min(\max(0, x_i), t) \f] * \f$ x_i \f$ is the input element, and \f$ t \f$ is the maximum value. */ ACTIVATION_TYPE_THRESHOLDRELU = 14, /** * Linear activation function. * The Linear function is defined as follows: \f[ \text{Linear}(x_i) = x_i \f] * \f$ x_i \f$ is the input element. */ ACTIVATION_TYPE_LINEAR = 15, /** * HardTanh activation function. * The HardTanh function is defined as follows: \f[ \text{HardTanh}(x_i) = \begin{cases} \text{max_val} & \text{ if } x_i > \text{ max_val } \\ \text{min_val} & \text{ if } x_i < \text{ min_val } \\ x_i & \text{ otherwise } \\ \end{cases} \f] * \f$ x_i \f$ is the input, \f$ max\_val \f$ is the maximum value, and \f$ min\_val \f$\n * is the minimum value. The two parameters are set by {@link Activation}. */ ACTIVATION_TYPE_HARD_TANH = 16, /** * Sign activation function. * The Sign function is defined as follows: \f[ Sign(x_i) = \begin{cases} -1, &if\ x_i < 0 \cr 0, &if\ x_i = 0 \cr 1, &if\ x_i > 0\end{cases} \f] * \f$ x_i \f$ is the input. */ ACTIVATION_TYPE_SIGN = 17, /** * Swish activation function. * The Swish function is defined as follows: \f[ \text{Swish}(x_i) = x_i * Sigmoid(x_i) \f] * \f$ x_i \f$ is the input. */ ACTIVATION_TYPE_SWISH = 18, /** * Gaussian error linear unit (GELU) activation function. * The GELU function is defined as follows: \f[ GELU(x_i) = x_i*P(X < x_i) \f] * \f$ x_i \f$ is the input element, and \f$ P \f$ is a cumulative distribution function of\n * the standard Gaussian distribution. * You need to use the approximate parameter of {@link Activation} to specify whether to use approximation. */ ACTIVATION_TYPE_GELU = 19, /** Unknown. */ ACTIVATION_TYPE_UNKNOWN = 20 }; /** * @brief Enumerates the modes for removing dimensions. It must be used together with the {@link ReduceFusion} operator. * * @since 3.2 * @version 2.1 */ enum ReduceMode : byte { /** Use the average value of all elements of a dimension to replace other elements of the dimension. */ REDUCE_MODE_MEAN = 0, /** Use the maximum value of all elements of a dimension to replace other elements of the dimension. */ REDUCE_MODE_MAX = 1, /** Use the minimum value of all elements of a dimension to replace other elements of the dimension. */ REDUCE_MODE_MIN = 2, /** Use the product of all elements of a dimension to replace other elements of the dimension. */ REDUCE_MODE_PROD = 3, /** Use the sum of all elements of a dimension to replace other elements of the dimension. */ REDUCE_MODE_SUM = 4, /** Use the sum of squares of all elements of a dimension to replace other elements of the dimension. */ REDUCE_MODE_SUM_SQUARE = 5, /** Use the sum of absolute values of all elements of a dimension to replace other elements of the dimension. */ REDUCE_MODE_ASUM = 6, /** Use the logical AND of all elements of a dimension to replace other elements of the dimension. */ REDUCE_MODE_ALL = 7, /** Use the L2-Normalization of all elements of a dimension to replace other elements of the dimension. */ REDUCE_MODE_L2 = 8 }; /** * @brief Enumerates the calculation types supported by elements. It must be used together with the\n * {@link Eltwise} operator. * * @since 3.2 * @version 2.1 */ enum EltwiseMode : byte { /** Product of the elements of two tensors */ ELTWISE_MODE_PROD = 0, /** Difference between the elements of two tensors */ ELTWISE_MODE_SUM = 1, /** Maximum value of the elements of two tensors */ ELTWISE_MODE_MAXIMUM = 2, /** Unknown type. */ ELTWISE_MODE_UNKNOWN = 3 }; /** * @brief Enumerates the padding types. It must be used together with {@link AvgPoolFusion}, {@link AvgPoolFusion},\n * {@link Conv2DFusion}, and {@link MaxPoolFusion}. * * @since 3.2 * @version 2.1 */ enum PadMode : byte { /** * Adds 0s in the input height and width directions. * If this mode is used, the padding parameter of the operator must be greater than or equal to 0. */ PAD_MODE_PAD = 0, /** * The output height and width are obtained by dividing the input height and width by a stride and rounding off\n * the quotient to an integer. * If this mode is used, the padding parameter of the operator must be 0. */ PAD_MODE_SAME = 1, /** * Return the output of a valid calculation without padding. Pixels that do not meet the calculation requirements\n * will be discarded. * If this mode is used, the padding parameter of the operator must be 0. */ PAD_MODE_VALID = 2, }; /** * @brief Enumerates the algorithms for rounding off decimals. It must be used together with the\n * {@link AvgPoolFusion} operator. * * @since 3.2 * @version 2.1 */ enum RoundMode : byte { /** Round down to the nearest integer. For example, 23.5 is rounded down to 23, and −23.5 is rounded down to −24. */ ROUND_MODE_FLOOR = 0, /** Round up to the nearest integer. For example, 23.5 is rounded up to 24, and −23.5 is rounded up to −23. */ ROUND_MODE_CEIL = 1 }; /** * @brief Enumerates the padding modes. It must be used together with the {@link PadFusion} operator. * * When x is \f$[[1,2,3],[4,5,6],[7,8,9]]\f$ and paddingsis \f$[[2,2], [2,2]] \f$,\n * the effect is as follows:
* If paddingMode==PADDING_MODE_CONSTANT and constantValue = 0, the output is as follows: * \f$[[0. 0. 0. 0. 0. 0. 0.],\\ [0. 0. 0. 0. 0. 0. 0.],\\ [0. 0. 1. 2. 3. 0. 0.],\\ [0. 0. 4. 5. 6. 0. 0.],\\ [0. 0. 7. 8. 9. 0. 0.],\\ [0. 0. 0. 0. 0. 0. 0.],\\ [0. 0. 0. 0. 0. 0. 0.]]\\ \f$ * * If paddingMode==PADDING_MODE_REFLECT, the output is as follows: * \f$[[9. 8. 7. 8. 9. 8. 7.],\\ [6. 5. 4. 5. 6. 5. 4.],\\ [3. 2. 1. 2. 3. 2. 1.],\\ [6. 5. 4. 5. 6. 5. 4.],\\ [9. 8. 7. 8. 9. 8. 7.],\\ [6. 5. 4. 5. 6. 5. 4.],\\ [3. 2. 1. 2. 3. 2. 1.]]\\ \f$ * * If paddingMode==PADDING_MODE_SYMMETRIC, the output is as follows: * \f$[[5. 4. 4. 5. 6. 6. 5.],\\ [2. 1. 1. 2. 3. 3. 2.],\\ [2. 1. 1. 2. 3. 3. 2.],\\ [5. 4. 4. 5. 6. 6. 5.],\\ [8. 7. 7. 8. 9. 9. 8.],\\ [8. 7. 7. 8. 9. 9. 8.],\\ [5. 4. 4. 5. 6. 6. 5.]]\\ \f$ * * @since 3.2 * @version 2.1 */ enum PaddingMode : byte { /** Constant (0 by default) padding. */ PADDING_MODE_CONSTANT = 0, /** Reflection padding, which uses the content next to the input data to pad the values directly next to it. */ PADDING_MODE_REFLECT = 1, /** Symmetric padding, which is similar to {@link PADDING_MODE_REFLECT}. Symmetric padding makes a copy of the input. */ PADDING_MODE_SYMMETRIC = 2, /** Reserved. */ PADDING_MODE_RESERVED = 3 }; /** * @brief Dedicated error codes defined by NNRt. They are used as the return values of HDIs. * * @since 4.0 * @version 2.1 */ enum NNRT_ReturnCode : unsigned int { /** Success. */ NNRT_SUCCESS = 0, /** Failed. */ NNRT_FAILED = 1, /** Null pointer. */ NNRT_NULL_PTR = 2, /** Invalid parameter. */ NNRT_INVALID_PARAMETER = 3, /** Memory error. */ NNRT_MEMORY_ERROR = 4, /** Insufficient memory. */ NNRT_OUT_OF_MEMORY = 5, /** Forbidden operation. */ NNRT_OPERATION_FORBIDDEN = 6, /** Invalid file. */ NNRT_INVALID_FILE = 7, /** Invalid path. */ NNRT_INVALID_PATH = 8, /** Insufficient cache. */ NNRT_INSUFFICIENT_BUFFER = 9, /** No change. */ NNRT_NO_CHANGE = 10, /** Not supported. */ NNRT_NOT_SUPPORT = 11, /** Service error. */ NNRT_SERVICE_ERROR = 12, /** Device error. */ NNRT_DEVICE_ERROR = 13, /** Device busy. */ NNRT_DEVICE_BUSY = 14, /** Operation canceled. */ NNRT_CANCELLED = 15, /** Access denied. */ NNRT_PERMISSION_DENIED = 16, /** Timeout. */ NNRT_TIME_OUT = 17, /** Invalid tensor. */ NNRT_INVALID_TENSOR = 18, /** Invalid node. */ NNRT_INVALID_NODE = 19, /** Invalid input. */ NNRT_INVALID_INPUT = 20, /** Invalid output. */ NNRT_INVALID_OUTPUT = 21, /** Invalid data type. */ NNRT_INVALID_DATATYPE = 22, /** Invalid data layout. */ NNRT_INVALID_FORMAT = 23, /** Invalid tensor name. */ NNRT_INVALID_TENSOR_NAME = 24, /** Invalid shape. */ NNRT_INVALID_SHAPE = 25, /** Dimension range exceeded. */ NNRT_OUT_OF_DIMENTION_RANGES = 26, /** Invalid cache. */ NNRT_INVALID_BUFFER = 27, /** Invalid cache size. */ NNRT_INVALID_BUFFER_SIZE = 28, /** Invalid performance mode. */ NNRT_INVALID_PERFORMANCE_MODE = 29, /** Invalid priority. */ NNRT_INVALID_PRIORITY = 30, /** Invalid model. */ NNRT_INVALID_MODEL = 31, /** Invalid model cache. */ NNRT_INVALID_MODEL_CACHE = 32, /** Operator not supported. */ NNRT_UNSUPPORTED_OP = 33 }; /** @} */