1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // This test only tests internal APIs, and has dependencies on internal header
18 // files, including NN API HIDL definitions.
19 // It is not part of CTS.
20 
21 #include <android/sharedmem.h>
22 #include <gtest/gtest.h>
23 
24 #include <fstream>
25 #include <string>
26 
27 #include "Manager.h"
28 #include "Memory.h"
29 #include "TestMemory.h"
30 #include "TestNeuralNetworksWrapper.h"
31 
32 using WrapperCompilation = ::android::nn::test_wrapper::Compilation;
33 using WrapperExecution = ::android::nn::test_wrapper::Execution;
34 using WrapperMemory = ::android::nn::test_wrapper::Memory;
35 using WrapperModel = ::android::nn::test_wrapper::Model;
36 using WrapperOperandType = ::android::nn::test_wrapper::OperandType;
37 using WrapperResult = ::android::nn::test_wrapper::Result;
38 using WrapperType = ::android::nn::test_wrapper::Type;
39 
40 namespace {
41 
42 // Tests to ensure that various kinds of memory leaks do not occur.
43 //
44 // The fixture checks that no anonymous shared memory regions are leaked by
45 // comparing the count of /dev/ashmem mappings in SetUp and TearDown. This could
46 // break if the test or framework starts lazily instantiating something that
47 // creates a mapping - at that point the way the test works needs to be
48 // reinvestigated. The filename /dev/ashmem is a documented part of the Android
49 // kernel interface (see
50 // https://source.android.com/devices/architecture/kernel/reqs-interfaces).
51 //
52 // (We can also get very unlucky and mask a memory leak by unrelated unmapping
53 // somewhere else. This seems unlikely enough to not deal with.)
54 class MemoryLeakTest : public ::testing::Test {
55    protected:
56     void SetUp() override;
57     void TearDown() override;
58 
59    private:
60     size_t GetAshmemMappingsCount();
61 
62     size_t mStartingMapCount = 0;
63     bool mIsCpuOnly;
64 };
65 
SetUp()66 void MemoryLeakTest::SetUp() {
67     mIsCpuOnly = android::nn::DeviceManager::get()->getUseCpuOnly();
68     mStartingMapCount = GetAshmemMappingsCount();
69 }
70 
TearDown()71 void MemoryLeakTest::TearDown() {
72     android::nn::DeviceManager::get()->setUseCpuOnly(mIsCpuOnly);
73     const size_t endingMapCount = GetAshmemMappingsCount();
74     ASSERT_EQ(mStartingMapCount, endingMapCount);
75 }
76 
GetAshmemMappingsCount()77 size_t MemoryLeakTest::GetAshmemMappingsCount() {
78     std::ifstream mappingsStream("/proc/self/maps");
79     if (!mappingsStream.good()) {
80         // errno is set by std::ifstream on Linux
81         ADD_FAILURE() << "Failed to open /proc/self/maps: " << std::strerror(errno);
82         return 0;
83     }
84     std::string line;
85     int mapCount = 0;
86     while (std::getline(mappingsStream, line)) {
87         if (line.find("/dev/ashmem") != std::string::npos) {
88             ++mapCount;
89         }
90     }
91     return mapCount;
92 }
93 
94 // As well as serving as a functional test for ASharedMemory, also
95 // serves as a regression test for http://b/69685100 "RunTimePoolInfo
96 // leaks shared memory regions".
97 //
98 // TODO: test non-zero offset.
TEST_F(MemoryLeakTest,TestASharedMemory)99 TEST_F(MemoryLeakTest, TestASharedMemory) {
100     // Layout where to place matrix2 and matrix3 in the memory we'll allocate.
101     // We have gaps to test that we don't assume contiguity.
102     constexpr uint32_t offsetForMatrix2 = 20;
103     constexpr uint32_t offsetForMatrix3 = offsetForMatrix2 + sizeof(matrix2) + 30;
104     constexpr uint32_t weightsSize = offsetForMatrix3 + sizeof(matrix3) + 60;
105 
106     int weightsFd = ASharedMemory_create("weights", weightsSize);
107     ASSERT_GT(weightsFd, -1);
108     uint8_t* weightsData =
109             (uint8_t*)mmap(nullptr, weightsSize, PROT_READ | PROT_WRITE, MAP_SHARED, weightsFd, 0);
110     ASSERT_NE(weightsData, nullptr);
111     memcpy(weightsData + offsetForMatrix2, matrix2, sizeof(matrix2));
112     memcpy(weightsData + offsetForMatrix3, matrix3, sizeof(matrix3));
113     WrapperMemory weights(weightsSize, PROT_READ | PROT_WRITE, weightsFd, 0);
114     ASSERT_TRUE(weights.isValid());
115 
116     WrapperModel model;
117     WrapperOperandType matrixType(WrapperType::TENSOR_FLOAT32, {3, 4});
118     WrapperOperandType scalarType(WrapperType::INT32, {});
119     int32_t activation(0);
120     auto a = model.addOperand(&matrixType);
121     auto b = model.addOperand(&matrixType);
122     auto c = model.addOperand(&matrixType);
123     auto d = model.addOperand(&matrixType);
124     auto e = model.addOperand(&matrixType);
125     auto f = model.addOperand(&scalarType);
126 
127     model.setOperandValueFromMemory(e, &weights, offsetForMatrix2, sizeof(Matrix3x4));
128     model.setOperandValueFromMemory(a, &weights, offsetForMatrix3, sizeof(Matrix3x4));
129     model.setOperandValue(f, &activation, sizeof(activation));
130     model.addOperation(ANEURALNETWORKS_ADD, {a, c, f}, {b});
131     model.addOperation(ANEURALNETWORKS_ADD, {b, e, f}, {d});
132     model.identifyInputsAndOutputs({c}, {d});
133     ASSERT_TRUE(model.isValid());
134     model.finish();
135 
136     // Test the two node model.
137     constexpr uint32_t offsetForMatrix1 = 20;
138     constexpr size_t inputSize = offsetForMatrix1 + sizeof(Matrix3x4);
139     int inputFd = ASharedMemory_create("input", inputSize);
140     ASSERT_GT(inputFd, -1);
141     uint8_t* inputData =
142             (uint8_t*)mmap(nullptr, inputSize, PROT_READ | PROT_WRITE, MAP_SHARED, inputFd, 0);
143     ASSERT_NE(inputData, nullptr);
144     memcpy(inputData + offsetForMatrix1, matrix1, sizeof(Matrix3x4));
145     WrapperMemory input(inputSize, PROT_READ, inputFd, 0);
146     ASSERT_TRUE(input.isValid());
147 
148     constexpr uint32_t offsetForActual = 32;
149     constexpr size_t outputSize = offsetForActual + sizeof(Matrix3x4);
150     int outputFd = ASharedMemory_create("output", outputSize);
151     ASSERT_GT(outputFd, -1);
152     uint8_t* outputData =
153             (uint8_t*)mmap(nullptr, outputSize, PROT_READ | PROT_WRITE, MAP_SHARED, outputFd, 0);
154     ASSERT_NE(outputData, nullptr);
155     memset(outputData, 0, outputSize);
156     WrapperMemory actual(outputSize, PROT_READ | PROT_WRITE, outputFd, 0);
157     ASSERT_TRUE(actual.isValid());
158 
159     WrapperCompilation compilation2(&model);
160     ASSERT_EQ(compilation2.finish(), WrapperResult::NO_ERROR);
161 
162     WrapperExecution execution2(&compilation2);
163     ASSERT_EQ(execution2.setInputFromMemory(0, &input, offsetForMatrix1, sizeof(Matrix3x4)),
164               WrapperResult::NO_ERROR);
165     ASSERT_EQ(execution2.setOutputFromMemory(0, &actual, offsetForActual, sizeof(Matrix3x4)),
166               WrapperResult::NO_ERROR);
167     ASSERT_EQ(execution2.compute(), WrapperResult::NO_ERROR);
168     ASSERT_EQ(
169             CompareMatrices(expected3, *reinterpret_cast<Matrix3x4*>(outputData + offsetForActual)),
170             0);
171 
172     munmap(weightsData, weightsSize);
173     munmap(inputData, inputSize);
174     munmap(outputData, outputSize);
175     close(weightsFd);
176     close(inputFd);
177     close(outputFd);
178 }
179 
180 #ifndef NNTEST_ONLY_PUBLIC_API
181 // Regression test for http://b/73663843, conv_2d trying to allocate too much memory.
TEST_F(MemoryLeakTest,convTooLarge)182 TEST_F(MemoryLeakTest, convTooLarge) {
183     android::nn::DeviceManager::get()->setUseCpuOnly(true);
184     WrapperModel model;
185 
186     // This kernel/input size will make convQuant8 allocate 12 * 13 * 13 * 128 * 92 * 92, which is
187     // just outside of signed int range (0x82F56000) - this will fail due to CPU implementation
188     // limitations
189     WrapperOperandType type3(WrapperType::INT32, {});
190     WrapperOperandType type2(WrapperType::TENSOR_INT32, {128}, 0.25, 0);
191     WrapperOperandType type0(WrapperType::TENSOR_QUANT8_ASYMM, {12, 104, 104, 128}, 0.5, 0);
192     WrapperOperandType type4(WrapperType::TENSOR_QUANT8_ASYMM, {12, 92, 92, 128}, 1.0, 0);
193     WrapperOperandType type1(WrapperType::TENSOR_QUANT8_ASYMM, {128, 13, 13, 128}, 0.5, 0);
194 
195     // Operands
196     auto op1 = model.addOperand(&type0);
197     auto op2 = model.addOperand(&type1);
198     auto op3 = model.addOperand(&type2);
199     auto pad0 = model.addOperand(&type3);
200     auto act = model.addOperand(&type3);
201     auto stride = model.addOperand(&type3);
202     auto op4 = model.addOperand(&type4);
203 
204     // Operations
205     uint8_t op2_init[128 * 13 * 13 * 128] = {};
206     model.setOperandValue(op2, op2_init, sizeof(op2_init));
207     int32_t op3_init[128] = {};
208     model.setOperandValue(op3, op3_init, sizeof(op3_init));
209     int32_t pad0_init[] = {0};
210     model.setOperandValue(pad0, pad0_init, sizeof(pad0_init));
211     int32_t act_init[] = {0};
212     model.setOperandValue(act, act_init, sizeof(act_init));
213     int32_t stride_init[] = {1};
214     model.setOperandValue(stride, stride_init, sizeof(stride_init));
215     model.addOperation(ANEURALNETWORKS_CONV_2D,
216                        {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
217 
218     // Inputs and outputs
219     model.identifyInputsAndOutputs({op1}, {op4});
220     ASSERT_TRUE(model.isValid());
221     model.finish();
222 
223     // Compilation
224     WrapperCompilation compilation(&model);
225     ASSERT_EQ(WrapperResult::NO_ERROR, compilation.finish());
226     WrapperExecution execution(&compilation);
227 
228     // Set input and outputs
229     static uint8_t input[12 * 104 * 104 * 128] = {};
230     ASSERT_EQ(WrapperResult::NO_ERROR, execution.setInput(0, input, sizeof(input)));
231     static uint8_t output[12 * 92 * 92 * 128] = {};
232     ASSERT_EQ(WrapperResult::NO_ERROR, execution.setOutput(0, output, sizeof(output)));
233 
234     // This shouldn't segfault
235     WrapperResult r = execution.compute();
236 
237     ASSERT_EQ(WrapperResult::OP_FAILED, r);
238 }
239 #endif  // NNTEST_ONLY_PUBLIC_API
240 
241 }  // end namespace
242