1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "batchnorm_builder.h"
17 
18 #include "mindir.h"
19 
20 #include "ops_registry.h"
21 
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace Ops {
25 static const int INPUT_NUM = 5;
26 static const int OUTPUT_NUM = 1;
27 static const int PARAM_MAX_NUM = 1;
28 static const int SCALAR_LENGTH = 1;
29 const std::string OP_NAME = "BatchNorm";
30 
BatchNormBuilder()31 BatchNormBuilder::BatchNormBuilder() {}
32 
~BatchNormBuilder()33 BatchNormBuilder::~BatchNormBuilder() {}
34 
SetEpsilon(const std::shared_ptr<NNTensor> & tensor)35 OH_NN_ReturnCode BatchNormBuilder::SetEpsilon(const std::shared_ptr<NNTensor>& tensor)
36 {
37     tensor->IdentifyOpParameter();
38     if (tensor->GetDataType() != OH_NN_FLOAT32) {
39         LOGE("[BatchNorm] SetEpsilon failed, the Epsilon should be type OH_NN_FLOAT32.");
40         return OH_NN_INVALID_PARAMETER;
41     }
42 
43     if (tensor->GetElementCount() != SCALAR_LENGTH) {
44         LOGE("[BatchNorm] SetEpsilon failed, the Epsilon shoule be a scalar");
45         return OH_NN_INVALID_PARAMETER;
46     }
47 
48     void* buffer = tensor->GetBuffer();
49     if (buffer == nullptr) {
50         LOGE("[BatchNorm] SetEpsilon failed, the epsilon passed a empty buffer.");
51         return OH_NN_INVALID_PARAMETER;
52     }
53 
54     m_epsilon = *static_cast<float*>(buffer);
55     return OH_NN_SUCCESS;
56 }
57 
Build(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)58 OH_NN_ReturnCode BatchNormBuilder::Build(const std::vector<uint32_t>& paramsIndex,
59                                          const std::vector<uint32_t>& inputsIndex,
60                                          const std::vector<uint32_t>& outputsIndex,
61                                          const std::vector<std::shared_ptr<NNTensor>>& allTensors)
62 {
63     if (m_isBuild) {
64         LOGE("[BatchNorm] Build failed, batchNorm operation has been build, cannot build again.");
65         return OH_NN_OPERATION_FORBIDDEN;
66     }
67 
68     OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
69     if (returnCode != OH_NN_SUCCESS) {
70         LOGE("[BatchNorm] Build failed, passed invalid input or output index.");
71         return returnCode;
72     }
73 
74     m_inputsIndex = inputsIndex;
75     m_outputsIndex = outputsIndex;
76 
77     returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM);
78     if (returnCode != OH_NN_SUCCESS) {
79         LOGE("[BatchNorm] Build failed, passed invalid param index.");
80         return returnCode;
81     }
82 
83     for (int i : paramsIndex) {
84         std::shared_ptr<NNTensor> tensor = allTensors[i];
85         if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) {
86             returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor);
87         } else {
88             LOGE("[BatchNorm] Build failed, param invalid, type=%d", tensor->GetType());
89             return OH_NN_INVALID_PARAMETER;
90         }
91 
92         if (returnCode != OH_NN_SUCCESS) {
93             LOGE("[BatchNorm] BatchNorm Build failed,, Passed invalid param.");
94             return returnCode;
95         }
96     }
97 
98     // The quantization type of the first output determinies that of the operator.
99     SetQuantType(outputsIndex, allTensors);
100 
101     m_isBuild = true;
102     m_name = OP_NAME;
103     return OH_NN_SUCCESS;
104 }
105 
GetPrimitive()106 LiteGraphPrimitvePtr BatchNormBuilder::GetPrimitive()
107 {
108     if (!m_isBuild) {
109         LOGE("[BatchNorm] GetPrimitive failed, cannot get primitive before call build.");
110         return {nullptr, DestroyLiteGraphPrimitive};
111     }
112 
113     void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(m_epsilon);
114     LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive);
115     return graphPrimitivePtr;
116 }
117 
118 REGISTER_OPS(BatchNormBuilder, OH_NN_OPS_BATCH_NORM);
119 } // namespace Ops
120 } // namespace NeuralNetworkRuntime
121 } // namespace OHOS