1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <HalInterfaces.h>
18 #include <SampleDriver.h>
19 #include <SampleDriverFull.h>
20 #include <android/hardware/neuralnetworks/1.2/ADevice.h>
21 #include <gtest/gtest.h>
22 
23 #include <algorithm>
24 #include <map>
25 #include <set>
26 #include <string>
27 #include <tuple>
28 #include <utility>
29 #include <vector>
30 
31 #include "HalUtils.h"
32 #include "Manager.h"
33 #include "Memory.h"
34 #include "TestNeuralNetworksWrapper.h"
35 #include "TestUtils.h"
36 
37 using namespace android::nn;
38 namespace hardware = android::hardware;
39 using WrapperResult = test_wrapper::Result;
40 using Type = test_wrapper::Type;
41 using android::sp;
42 using android::nn::isAhwbBlob;
43 
44 namespace {
45 
46 // A buffer for test that does nothing.
47 class TestBuffer : public V1_3::IBuffer {
48    public:
copyTo(const hardware::hidl_memory &)49     hardware::Return<V1_3::ErrorStatus> copyTo(const hardware::hidl_memory&) override {
50         return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
51     }
copyFrom(const hardware::hidl_memory &,const hardware::hidl_vec<uint32_t> &)52     hardware::Return<V1_3::ErrorStatus> copyFrom(const hardware::hidl_memory&,
53                                                  const hardware::hidl_vec<uint32_t>&) override {
54         return V1_3::ErrorStatus::DEVICE_UNAVAILABLE;
55     }
56 };
57 
58 enum class AllocateReturn { OK, BAD_TOKEN, BAD_IBUFFER, BAD_STATUS, NOT_SUPPORTED };
59 
60 // Print AllocateReturn enum for better GTEST failure messages
operator <<(std::ostream & os,AllocateReturn allocateReturn)61 std::ostream& operator<<(std::ostream& os, AllocateReturn allocateReturn) {
62     switch (allocateReturn) {
63         case AllocateReturn::OK:
64             return os << "OK";
65         case AllocateReturn::BAD_IBUFFER:
66             return os << "BAD_IBUFFER";
67         case AllocateReturn::BAD_TOKEN:
68             return os << "BAD_TOKEN";
69         case AllocateReturn::BAD_STATUS:
70             return os << "BAD_STATUS";
71         case AllocateReturn::NOT_SUPPORTED:
72             return os << "NOT_SUPPORTED";
73     }
74     LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(allocateReturn);
75     return os;
76 }
77 
78 class TestDriverLatest : public sample_driver::SampleDriver {
79    public:
TestDriverLatest(const char * name,std::set<V1_3::OperationType> supportedOperations,AllocateReturn allocateReturn)80     TestDriverLatest(const char* name, std::set<V1_3::OperationType> supportedOperations,
81                      AllocateReturn allocateReturn)
82         : SampleDriver(name),
83           kSupportedOperations(std::move(supportedOperations)),
84           kAllocateReturn(allocateReturn) {}
85 
getCapabilities_1_3(getCapabilities_1_3_cb cb)86     hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
87         android::nn::initVLogMask();
88         // Faster than cpu.
89         const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
90         const V1_3::Capabilities capabilities = {
91                 .relaxedFloat32toFloat16PerformanceScalar = kPerf,
92                 .relaxedFloat32toFloat16PerformanceTensor = kPerf,
93                 .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf),
94                 .ifPerformance = kPerf,
95                 .whilePerformance = kPerf};
96         cb(V1_3::ErrorStatus::NONE, capabilities);
97         return hardware::Void();
98     }
99 
getSupportedOperations_1_3(const V1_3::Model & model,getSupportedOperations_1_3_cb cb)100     hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
101                                                       getSupportedOperations_1_3_cb cb) override {
102         // The tests will never use a referenced model.
103         CHECK(model.referenced.size() == 0);
104         std::vector<bool> supported(model.main.operations.size(), false);
105         std::transform(model.main.operations.begin(), model.main.operations.end(),
106                        supported.begin(), [this](const V1_3::Operation& op) {
107                            return kSupportedOperations.count(op.type) > 0;
108                        });
109         cb(V1_3::ErrorStatus::NONE, supported);
110         return hardware::Void();
111     }
112 
allocate(const V1_3::BufferDesc &,const hardware::hidl_vec<sp<V1_3::IPreparedModel>> &,const hardware::hidl_vec<V1_3::BufferRole> &,const hardware::hidl_vec<V1_3::BufferRole> &,allocate_cb cb)113     hardware::Return<void> allocate(const V1_3::BufferDesc&,
114                                     const hardware::hidl_vec<sp<V1_3::IPreparedModel>>&,
115                                     const hardware::hidl_vec<V1_3::BufferRole>&,
116                                     const hardware::hidl_vec<V1_3::BufferRole>&,
117                                     allocate_cb cb) override {
118         switch (kAllocateReturn) {
119             case AllocateReturn::OK:
120                 cb(V1_3::ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++);
121                 return hardware::Void();
122             case AllocateReturn::BAD_IBUFFER:
123                 cb(V1_3::ErrorStatus::NONE, nullptr, mValidBufferToken++);
124                 return hardware::Void();
125             case AllocateReturn::BAD_TOKEN:
126                 cb(V1_3::ErrorStatus::NONE, new TestBuffer(), 0);
127                 return hardware::Void();
128             case AllocateReturn::BAD_STATUS:
129                 cb(V1_3::ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++);
130                 return hardware::Void();
131             case AllocateReturn::NOT_SUPPORTED:
132                 cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0);
133                 return hardware::Void();
134         }
135         LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(kAllocateReturn);
136         return hardware::Void();
137     }
138 
139    private:
140     const std::set<V1_3::OperationType> kSupportedOperations;
141     const AllocateReturn kAllocateReturn;
142     uint32_t mValidBufferToken = 1;
143 };
144 
145 // Create the following model for test.
146 //
147 //     input0 ---+
148 //               +--- ADD ---> output0 ---+
149 //     input1 ---+                        +--- MUL ---> output1 (dynamic shape)
150 //               +--- SUB ---> temp    ---+
151 //     input2 ---+
152 //
createTestModel(test_wrapper::Model * model)153 void createTestModel(test_wrapper::Model* model) {
154     test_wrapper::OperandType tensorTypeFullySpecified(Type::TENSOR_FLOAT32, {1});
155     test_wrapper::OperandType tensorTypeDynamicShape(Type::TENSOR_FLOAT32, {0});
156     test_wrapper::OperandType actType(Type::INT32, {});
157     uint32_t input0 = model->addOperand(&tensorTypeFullySpecified);
158     uint32_t input1 = model->addOperand(&tensorTypeFullySpecified);
159     uint32_t input2 = model->addOperand(&tensorTypeFullySpecified);
160     uint32_t temp = model->addOperand(&tensorTypeFullySpecified);
161     uint32_t output0 = model->addOperand(&tensorTypeFullySpecified);
162     uint32_t output1 = model->addOperand(&tensorTypeDynamicShape);
163     uint32_t act = model->addOperand(&actType);
164     int32_t activation = 0;
165     model->setOperandValue(act, &activation, sizeof(int32_t));
166     model->addOperation(ANEURALNETWORKS_ADD, {input0, input1, act}, {output0});
167     model->addOperation(ANEURALNETWORKS_SUB, {input1, input2, act}, {temp});
168     model->addOperation(ANEURALNETWORKS_MUL, {output0, temp, act}, {output1});
169     model->identifyInputsAndOutputs({input0, input1, input2}, {output0, output1});
170     EXPECT_EQ(model->finish(), WrapperResult::NO_ERROR);
171 }
172 
173 class MemoryDomainTestBase : public ::testing::Test {
174    protected:
SetUp()175     void SetUp() override {
176         ::testing::Test::SetUp();
177         if (DeviceManager::get()->getUseCpuOnly()) {
178             GTEST_SKIP();
179         }
180         createTestModel(&mModel);
181         // Clear the device list.
182         DeviceManager::get()->forTest_setDevices({});
183     }
184 
TearDown()185     void TearDown() override {
186         DeviceManager::get()->forTest_reInitializeDeviceList();
187         ::testing::Test::TearDown();
188     }
189 
190     // If "deviceNames" is not empty, the compilation is created with explicit device list;
191     // otherwise, it is created normally.
createCompilation(const std::vector<std::string> & deviceNames)192     test_wrapper::Compilation createCompilation(const std::vector<std::string>& deviceNames) {
193         test_wrapper::Compilation compilation;
194         if (!deviceNames.empty()) {
195             // Map device names to ANeuralNetworksDevice.
196             std::map<std::string, ANeuralNetworksDevice*> deviceMap;
197             uint32_t numDevices = 0;
198             EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
199             for (uint32_t i = 0; i < numDevices; i++) {
200                 ANeuralNetworksDevice* device = nullptr;
201                 const char* name = nullptr;
202                 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
203                 EXPECT_EQ(ANeuralNetworksDevice_getName(device, &name), ANEURALNETWORKS_NO_ERROR);
204                 deviceMap.emplace(name, device);
205             }
206             std::vector<const ANeuralNetworksDevice*> devices(deviceNames.size());
207             std::transform(deviceNames.begin(), deviceNames.end(), devices.begin(),
208                            [&deviceMap](const std::string& name) { return deviceMap.at(name); });
209             WrapperResult result;
210             std::tie(result, compilation) =
211                     test_wrapper::Compilation::createForDevices(&mModel, devices);
212             EXPECT_EQ(result, WrapperResult::NO_ERROR);
213         } else {
214             compilation = test_wrapper::Compilation(&mModel);
215         }
216         EXPECT_EQ(compilation.finish(), WrapperResult::NO_ERROR);
217         return compilation;
218     }
219 
allocateDeviceMemory(const test_wrapper::Compilation & compilation,const std::vector<uint32_t> & inputIndexes,const std::vector<uint32_t> & outputIndexes)220     std::pair<int, test_wrapper::Memory> allocateDeviceMemory(
221             const test_wrapper::Compilation& compilation, const std::vector<uint32_t>& inputIndexes,
222             const std::vector<uint32_t>& outputIndexes) {
223         const auto* annCompilation = compilation.getHandle();
224         ANeuralNetworksMemoryDesc* desc = nullptr;
225         EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
226         for (uint32_t index : inputIndexes) {
227             EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, annCompilation, index, 1.0f),
228                       ANEURALNETWORKS_NO_ERROR);
229         }
230         for (uint32_t index : outputIndexes) {
231             EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, annCompilation, index, 1.0f),
232                       ANEURALNETWORKS_NO_ERROR);
233         }
234         EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
235 
236         ANeuralNetworksMemory* memory;
237         int n = ANeuralNetworksMemory_createFromDesc(desc, &memory);
238         ANeuralNetworksMemoryDesc_free(desc);
239         return {n, test_wrapper::Memory(memory)};
240     }
241 
242     test_wrapper::Model mModel;
243 };
244 
245 // Test memory domain with the following parameters
246 // - If true, use a V1_2 driver, otherwise, use the latest version;
247 // - If true, compile with explicit device list, otherwise, compile in the default way;
248 // - The return of the allocate function.
249 using MemoryDomainTestParam = std::tuple<bool, bool, AllocateReturn>;
250 
251 class MemoryDomainTest : public MemoryDomainTestBase,
252                          public ::testing::WithParamInterface<MemoryDomainTestParam> {
253    protected:
254     // If kUseV1_2Driver, allocateReturn must be AllocateReturn::NOT_SUPPORTED.
createAndRegisterDriver(const char * name,std::set<V1_3::OperationType> supportedOperations,AllocateReturn allocateReturn)255     void createAndRegisterDriver(const char* name,
256                                  std::set<V1_3::OperationType> supportedOperations,
257                                  AllocateReturn allocateReturn) {
258         if (kUseV1_2Driver) {
259             CHECK(allocateReturn == AllocateReturn::NOT_SUPPORTED);
260             const sp<TestDriverLatest> testDriver =
261                     new TestDriverLatest(name, supportedOperations, AllocateReturn::NOT_SUPPORTED);
262             DeviceManager::get()->forTest_registerDevice(
263                     makeSharedDevice(name, new V1_2::ADevice(testDriver)));
264         } else {
265             DeviceManager::get()->forTest_registerDevice(makeSharedDevice(
266                     name,
267                     new TestDriverLatest(name, std::move(supportedOperations), allocateReturn)));
268         }
269     }
270 
271     // If not kCompileWithExplicitDeviceList, the input argument "deviceNames" is ignored.
createCompilation(const std::vector<std::string> & deviceNames)272     test_wrapper::Compilation createCompilation(const std::vector<std::string>& deviceNames) {
273         if (kCompileWithExplicitDeviceList) {
274             return MemoryDomainTestBase::createCompilation(deviceNames);
275         } else {
276             return MemoryDomainTestBase::createCompilation({});
277         }
278     }
279 
280     const bool kUseV1_2Driver = std::get<0>(GetParam());
281     const bool kCompileWithExplicitDeviceList = std::get<1>(GetParam());
282     const AllocateReturn kAllocateReturn = std::get<2>(GetParam());
283 };
284 
isAshmem(const SharedMemory & memory)285 bool isAshmem(const SharedMemory& memory) {
286     return memory != nullptr && std::holds_alternative<Memory::Ashmem>(memory->handle);
287 }
288 
289 // Test device memory allocation on a compilation with only a single partition.
TEST_P(MemoryDomainTest,SinglePartition)290 TEST_P(MemoryDomainTest, SinglePartition) {
291     createAndRegisterDriver(
292             "test_driver",
293             {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
294             kAllocateReturn);
295     auto compilation = createCompilation({"test_driver"});
296     ASSERT_NE(compilation.getHandle(), nullptr);
297 
298     auto [n, memory] = allocateDeviceMemory(compilation, {0}, {0});
299     if (kAllocateReturn == AllocateReturn::OK) {
300         // The memory should be backed by the IBuffer returned from the driver.
301         ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
302         const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
303         ASSERT_NE(m, nullptr);
304         EXPECT_NE(m->getIBuffer(), nullptr);
305     } else {
306         if (kCompileWithExplicitDeviceList) {
307             // Should not fallback when the compiled with explicit device list.
308             ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
309         } else {
310             // The memory should fallback to ashmem or blob ahwb based on the driver version.
311             ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
312             const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
313             ASSERT_NE(m, nullptr);
314             EXPECT_EQ(m->getIBuffer(), nullptr);
315             const auto& memory = m->getMemory();
316             EXPECT_TRUE(validate(memory).ok());
317             if (kUseV1_2Driver) {
318                 EXPECT_TRUE(isAshmem(memory));
319             } else {
320                 EXPECT_TRUE(isAhwbBlob(memory));
321             }
322         }
323     }
324 }
325 
326 // Test device memory allocation on a compilation with multiple partitions.
TEST_P(MemoryDomainTest,MultiplePartitions)327 TEST_P(MemoryDomainTest, MultiplePartitions) {
328     createAndRegisterDriver("test_driver_add", {V1_3::OperationType::ADD}, kAllocateReturn);
329     createAndRegisterDriver("test_driver_sub", {V1_3::OperationType::SUB}, kAllocateReturn);
330     createAndRegisterDriver("test_driver_mul", {V1_3::OperationType::MUL}, kAllocateReturn);
331     auto compilation = createCompilation({"test_driver_add", "test_driver_sub", "test_driver_mul"});
332     ASSERT_NE(compilation.getHandle(), nullptr);
333 
334     {
335         // input0 is only used in one single partition.
336         auto [n, memory] = allocateDeviceMemory(compilation, {0}, {});
337         if (kAllocateReturn == AllocateReturn::OK) {
338             // The memory should be backed by the IBuffer returned from the driver.
339             ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
340             const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
341             ASSERT_NE(m, nullptr);
342             EXPECT_NE(m->getIBuffer(), nullptr);
343         } else {
344             if (kCompileWithExplicitDeviceList) {
345                 // Should not fallback when the compiled with explicit device list.
346                 ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
347             } else {
348                 // The memory should fallback to ashmem or blob ahwb based on the driver version.
349                 ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
350                 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
351                 ASSERT_NE(m, nullptr);
352                 EXPECT_EQ(m->getIBuffer(), nullptr);
353                 const auto& memory = m->getMemory();
354                 EXPECT_TRUE(validate(memory).ok());
355                 if (kUseV1_2Driver) {
356                     EXPECT_TRUE(isAshmem(memory));
357                 } else {
358                     EXPECT_TRUE(isAhwbBlob(memory));
359                 }
360             }
361         }
362     }
363 
364     {
365         // input1 is shared by two partitions with different drivers, so the runtime will not
366         // attempt to allocate on device.
367         auto [n, memory] = allocateDeviceMemory(compilation, {1}, {});
368         if (kCompileWithExplicitDeviceList) {
369             // Should not fallback when the compiled with explicit device list.
370             ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
371         } else {
372             // The memory should fallback to ashmem or blob ahwb based on the driver version.
373             ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
374             const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
375             ASSERT_NE(m, nullptr);
376             EXPECT_EQ(m->getIBuffer(), nullptr);
377             const auto& memory = m->getMemory();
378             EXPECT_TRUE(validate(memory).ok());
379             if (kUseV1_2Driver) {
380                 EXPECT_TRUE(isAshmem(memory));
381             } else {
382                 EXPECT_TRUE(isAhwbBlob(memory));
383             }
384         }
385     }
386 
387     {
388         // output0 is shared by two partitions with different drivers, so the runtime will not
389         // attempt to allocate on device.
390         auto [n, memory] = allocateDeviceMemory(compilation, {}, {0});
391         if (kCompileWithExplicitDeviceList) {
392             // Should not fallback when the compiled with explicit device list.
393             ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
394         } else {
395             // The memory should fallback to ashmem or blob ahwb based on the driver version.
396             ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
397             const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
398             ASSERT_NE(m, nullptr);
399             EXPECT_EQ(m->getIBuffer(), nullptr);
400             const auto& memory = m->getMemory();
401             EXPECT_TRUE(validate(memory).ok());
402             if (kUseV1_2Driver) {
403                 EXPECT_TRUE(isAshmem(memory));
404             } else {
405                 EXPECT_TRUE(isAhwbBlob(memory));
406             }
407         }
408     }
409 }
410 
411 // Test device memory allocation with dynamic shape.
TEST_P(MemoryDomainTest,DynamicShape)412 TEST_P(MemoryDomainTest, DynamicShape) {
413     createAndRegisterDriver(
414             "test_driver",
415             {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL},
416             kAllocateReturn);
417     auto compilation = createCompilation({"test_driver"});
418     ASSERT_NE(compilation.getHandle(), nullptr);
419 
420     auto [n, memory] = allocateDeviceMemory(compilation, {}, {1});
421     if (kAllocateReturn == AllocateReturn::OK) {
422         // The memory should be backed by the IBuffer returned from the driver.
423         ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR);
424         const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get());
425         ASSERT_NE(m, nullptr);
426         EXPECT_NE(m->getIBuffer(), nullptr);
427     } else {
428         // We do not fallback in the case of dynamic shape.
429         ASSERT_EQ(n, ANEURALNETWORKS_OP_FAILED);
430     }
431 }
432 
433 static const auto kAllocateReturnChoices =
434         testing::Values(AllocateReturn::OK, AllocateReturn::BAD_TOKEN, AllocateReturn::BAD_IBUFFER,
435                         AllocateReturn::BAD_STATUS, AllocateReturn::NOT_SUPPORTED);
436 
437 INSTANTIATE_TEST_SUITE_P(DeviceVersionV1_2, MemoryDomainTest,
438                          testing::Combine(testing::Values(true), testing::Bool(),
439                                           testing::Values(AllocateReturn::NOT_SUPPORTED)));
440 
441 // Hardware buffers are an Android concept, which aren't necessarily
442 // available on other platforms such as ChromeOS, which also build NNAPI.
443 // When using the latest driver, memory is allocated via hardware buffers,
444 // which will fail on non-android platforms.
445 #if defined(__ANDROID__)
446 INSTANTIATE_TEST_SUITE_P(DeviceVersionLatest, MemoryDomainTest,
447                          testing::Combine(testing::Values(false), testing::Bool(),
448                                           kAllocateReturnChoices));
449 
450 class MemoryDomainCopyTest : public MemoryDomainTestBase {};
451 
TEST_F(MemoryDomainCopyTest,MemoryCopyTest)452 TEST_F(MemoryDomainCopyTest, MemoryCopyTest) {
453     DeviceManager::get()->forTest_registerDevice(makeSharedDevice(
454             "test_driver", new sample_driver::SampleDriverFull(
455                                    "test_driver", {.execTime = 0.1f, .powerUsage = 0.1f})));
456     auto compilation = createCompilation({"test_driver"});
457     ASSERT_NE(compilation.getHandle(), nullptr);
458 
459     // Allocate ashmem.
460     const float initValue1 = 3.14f, initValue2 = 2.72f;
461     auto ashmem1 = TestAshmem::createFrom(&initValue1, sizeof(float));
462     auto ashmem2 = TestAshmem::createFrom(&initValue2, sizeof(float));
463     ASSERT_NE(ashmem1, nullptr);
464     ASSERT_NE(ashmem2, nullptr);
465 
466     // Allocate device memories.
467     auto [n1, memory1] = allocateDeviceMemory(compilation, {0}, {});
468     auto [n2, memory2] = allocateDeviceMemory(compilation, {0}, {});
469     ASSERT_EQ(n1, ANEURALNETWORKS_NO_ERROR);
470     ASSERT_EQ(n2, ANEURALNETWORKS_NO_ERROR);
471 
472     // Test memory copying: ashmem1 -> memory1 -> memory2 -> ashmem2
473     ASSERT_EQ(ANeuralNetworksMemory_copy(ashmem1->get()->get(), memory1.get()),
474               ANEURALNETWORKS_NO_ERROR);
475     ASSERT_EQ(ANeuralNetworksMemory_copy(memory1.get(), memory2.get()), ANEURALNETWORKS_NO_ERROR);
476     ASSERT_EQ(ANeuralNetworksMemory_copy(memory2.get(), ashmem2->get()->get()),
477               ANEURALNETWORKS_NO_ERROR);
478 
479     EXPECT_EQ(ashmem2->dataAs<float>()[0], initValue1);
480 }
481 #endif
482 
483 }  // namespace
484