/aosp12/packages/modules/NeuralNetworks/runtime/include/ |
H A D | NeuralNetworksWrapper.h | 189 : mNnApi(nnapi), mMemory(memory) {} in Memory() 192 : mNnApi(nnapi) { 201 Memory(const NnApiSupportLibrary* nnapi, AHardwareBuffer* buffer) : mNnApi(nnapi) { argument 252 Model(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) { argument 378 Event(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) {} argument 379 Event(const NnApiSupportLibrary* nnapi, int syncFd) : mNnApi(nnapi) { argument 456 return createForDevices(nnapi, model, {device}); 459 const NnApiSupportLibrary* nnapi, const Model* model, argument 466 return {result, Compilation(nnapi, compilation)}; 567 : mNnApi(nnapi), mCompilation(compilation) {} [all …]
|
/aosp12/packages/modules/NeuralNetworks/shim_and_sl/ |
H A D | ShimDeviceManager.cpp | 84 const std::shared_ptr<const NnApiSupportLibrary>& nnapi) { in getNamedDevices() argument 86 if (nnapi->ANeuralNetworks_getDeviceCount(&numDevices) != ANEURALNETWORKS_NO_ERROR) { in getNamedDevices() 94 if (nnapi->ANeuralNetworks_getDevice(i, &device) != ANEURALNETWORKS_NO_ERROR) { in getNamedDevices() 100 if (nnapi->ANeuralNetworksDevice_getName(device, &name) != ANEURALNETWORKS_NO_ERROR) { in getNamedDevices() 139 const std::shared_ptr<const NnApiSupportLibrary> nnapi = in registerDevices() local 145 const auto nameToDevice = getNamedDevices(nnapi); in registerDevices() 156 auto shimDevice = ndk::SharedRefBase::make<ShimDevice>(nnapi, device, info.serviceName); in registerDevices()
|
H A D | ShimDevice.cpp | 105 nnapi->SL_ANeuralNetworksDevice_getPerformanceInfo( in getCapabilities() 109 nnapi->SL_ANeuralNetworksDevice_getPerformanceInfo( in getCapabilities() 113 nnapi->SL_ANeuralNetworksDevice_getPerformanceInfo( in getCapabilities() 117 nnapi->SL_ANeuralNetworksDevice_getPerformanceInfo( in getCapabilities() 129 nnapi->SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo( in getCapabilities() 135 NumberOfCacheFiles getNumberOfCacheFilesNeeded(const NnApiSupportLibrary* nnapi, in getNumberOfCacheFilesNeeded() argument 147 std::vector<Extension> getVendorExtensions(const NnApiSupportLibrary* nnapi, in getVendorExtensions() argument 150 nnapi->SL_ANeuralNetworksDevice_getVendorExtensionCount(device, &vendorExtensionCount); in getVendorExtensions() 172 nnapi->SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation( in getVendorExtensions() 181 ShimDevice::ShimDevice(std::shared_ptr<const NnApiSupportLibrary> nnapi, in ShimDevice() argument [all …]
|
H A D | ShimConverter.cpp | 47 const NnApiSupportLibrary* nnapi, in convertSubgraphFromHAL() argument 59 ::android::nn::sl_wrapper::Model resultModel(nnapi); in convertSubgraphFromHAL() 154 auto subgraph = convertSubgraphFromHAL(nnapi, memoryPools, model, allModels, in convertSubgraphFromHAL() 355 std::optional<ShimConvertedModel> convertFromHAL(const NnApiSupportLibrary* nnapi, in convertFromHAL() argument 375 std::unique_ptr<::android::nn::sl_wrapper::Memory> memory = convertFromHAL(nnapi, pool); in convertFromHAL() 392 if (convertSubgraphFromHAL(nnapi, memoryPools, model, &allModels, i, *copiedOperandValues, in convertFromHAL() 415 const NnApiSupportLibrary* nnapi, const neuralnetworks::Memory& pool) { in convertFromHAL() argument 424 nnapi, size, PROT_READ | PROT_WRITE, fd, 0, /*ownsFd=*/false); in convertFromHAL() 438 nnapi, size, prot, fd, offset, /*ownsFd=*/false); in convertFromHAL() 484 std::make_unique<::android::nn::sl_wrapper::Memory>(nnapi, ahwb, in convertFromHAL()
|
/aosp12/packages/modules/NeuralNetworks/runtime/test/ |
H A D | GeneratedTestUtils.h | 58 GeneratedModel(const NnApiSupportLibrary* nnapi) : sl_wrapper::Model(nnapi) {} in GeneratedModel() argument 87 void createModel(const NnApiSupportLibrary* nnapi, const test_helper::TestModel& testModel, 89 inline void createModel(const NnApiSupportLibrary* nnapi, const test_helper::TestModel& testModel, in createModel() argument 91 createModel(nnapi, testModel, /*testDynamicOutputShape=*/false, model); in createModel()
|
H A D | GeneratedTestUtils.cpp | 59 static std::unique_ptr<MemoryAHWB> create(const NnApiSupportLibrary* nnapi, uint32_t size) { in create() argument 81 return std::unique_ptr<MemoryAHWB>(new MemoryAHWB(nnapi, ahwb, buffer)); in create() 96 MemoryAHWB(const NnApiSupportLibrary* nnapi, AHardwareBuffer* ahwb, void* buffer) in create() argument 97 : Memory(nnapi, ahwb, false, {}), mAhwb(ahwb), mBuffer(buffer) {} in create() 107 static std::unique_ptr<MemoryAHWB> createConstantReferenceMemory(const NnApiSupportLibrary* nnapi, argument 127 return size == 0 ? nullptr : MemoryAHWB::create(nnapi, size); 188 void createModel(const NnApiSupportLibrary* nnapi, const TestModel& testModel, argument 196 std::unique_ptr<MemoryAHWB> memory = createConstantReferenceMemory(nnapi, testModel); 205 refModels.push_back(Model(nnapi));
|
H A D | SupportLibraryTestUtils.h | 39 static std::unique_ptr<TestAshmem> createFrom(const NnApiSupportLibrary* nnapi, in createFrom() argument 41 return createFrom(nnapi, buffer.get<void>(), buffer.size()); in createFrom() 45 static std::unique_ptr<TestAshmem> createFrom(const NnApiSupportLibrary* nnapi, in createFrom() argument 59 sl_wrapper::Memory memory(nnapi, length, PROT_READ | PROT_WRITE, fd, 0); in createFrom()
|
H A D | TestGpuNnapi.cpp | 812 auto nnapi = std::make_unique<NnapiExecutor>(input, output); in create() local 813 nnapi->initialize(device); in create() 814 return nnapi->mIsValid ? std::move(nnapi) : nullptr; in create() 959 auto nnapi = NnapiExecutor<dataType>::create(kDevice, mGpuOutput, mNnapiOutput); in runTest() local 960 if (nnapi == nullptr) return; in runTest() 967 auto [nnapiSuccess, nnapiSyncFd] = nnapi->run(gpuSyncFd); in runTest()
|
/aosp12/packages/modules/NeuralNetworks/shim_and_sl/include/ |
H A D | SupportLibraryWrapper.h | 51 Memory(const NnApiSupportLibrary* nnapi, ANeuralNetworksMemory* memory) in Memory() argument 52 : mNnApi(nnapi), mMemory(memory), mSize(0) {} in Memory() 64 : mNnApi(nnapi), mOwnedAHWB(ownAHWB ? buffer : nullptr), mSize(size) { in Memory() 71 : mNnApi(nnapi), mSize(size) { in Memory() 141 Model(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) { in Model() argument 352 return createForDevices(nnapi, model, {device}); in createForDevice() 355 const NnApiSupportLibrary* nnapi, const Model* model, in createForDevices() argument 362 return {result, Compilation(nnapi, compilation)}; in createForDevices() 467 : mNnApi(nnapi), mCompilation(compilation) {} in Compilation() 475 Execution(const NnApiSupportLibrary* nnapi, const Compilation* compilation) in Execution() argument [all …]
|
H A D | ShimConverter.h | 51 std::optional<ShimConvertedModel> convertFromHAL(const NnApiSupportLibrary* nnapi, 56 const NnApiSupportLibrary* nnapi, const neuralnetworks::Memory& pool);
|
H A D | ShimPreparedModel.h | 33 ShimPreparedModel(std::shared_ptr<const NnApiSupportLibrary> nnapi, in ShimPreparedModel() argument 39 : mNnapi(nnapi), in ShimPreparedModel()
|
/aosp12/build/make/target/board/ |
H A D | gsi_system_ext_user.prop | 4 # GSI disables non-AOSP nnapi extensions on product partition 5 ro.nnapi.extensions.deny_on_product=true
|
H A D | gsi_system_ext.prop | 7 # GSI disables non-AOSP nnapi extensions on product partition 8 ro.nnapi.extensions.deny_on_product=true
|
/aosp12/packages/modules/NeuralNetworks/driver/sample_shim/config/ |
H A D | android.hardware.neuralnetworks-shell-service-sample.rc | 2 interface aidl android.hardware.neuralnetworks.IDevice/nnapi-sample_sl_updatable
|
/aosp12/packages/modules/NeuralNetworks/runtime/test/specs/ |
H A D | visualize_spec.sh | 27 LOG_DIR=$(mktemp -d)/nnapi-spec-html
|
/aosp12/packages/modules/NeuralNetworks/tools/api/ |
H A D | Types.t | 37 #include "nnapi/OperandTypes.h" 38 #include "nnapi/OperationTypes.h" 39 #include "nnapi/Result.h"
|
H A D | generate_api.sh | 31 CANONICALDIR=${ANDROID_BUILD_TOP}/packages/modules/NeuralNetworks/common/include/nnapi
|
/aosp12/packages/modules/NeuralNetworks/ |
H A D | TEST_MAPPING | 41 "path": "external/tensorflow/tensorflow/lite/delegates/nnapi"
|
/aosp12/packages/modules/NeuralNetworks/runtime/test/fuzzing/ |
H A D | visualize_random_graph.sh | 27 LOG_DIR=$(mktemp -d)/nnapi-fuzzing-logs
|
/aosp12/system/sepolicy/prebuilts/api/29.0/public/ |
H A D | hal_neuralnetworks.te | 16 # Allow NN HAL client to check the ro.nnapi.extensions.deny_on_product
|
/aosp12/packages/modules/NeuralNetworks/tools/test_generator/test_harness/ |
H A D | Android.bp | 44 local_include_dirs: ["include/nnapi"],
|
/aosp12/hardware/interfaces/neuralnetworks/utils/adapter/ |
H A D | Android.bp | 30 local_include_dirs: ["include/nnapi/hal"],
|
/aosp12/hardware/interfaces/neuralnetworks/utils/service/ |
H A D | Android.bp | 30 local_include_dirs: ["include/nnapi/hal"],
|
/aosp12/hardware/interfaces/neuralnetworks/utils/common/ |
H A D | Android.bp | 30 local_include_dirs: ["include/nnapi/hal"],
|
/aosp12/hardware/interfaces/neuralnetworks/1.0/utils/ |
H A D | Android.bp | 30 local_include_dirs: ["include/nnapi/hal/1.0/"],
|