1 /* 2 * Copyright (C) 2021 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_PREPARED_MODEL_H 18 #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_PREPARED_MODEL_H 19 20 #include <aidl/android/hardware/neuralnetworks/IPreparedModel.h> 21 #include <aidl/android/hardware/neuralnetworks/Request.h> 22 #include <nnapi/IPreparedModel.h> 23 #include <nnapi/Result.h> 24 #include <nnapi/Types.h> 25 #include <nnapi/hal/CommonUtils.h> 26 27 #include <memory> 28 #include <tuple> 29 #include <utility> 30 #include <vector> 31 32 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface 33 // lifetimes across processes and for protecting asynchronous calls across AIDL. 34 35 namespace aidl::android::hardware::neuralnetworks::utils { 36 37 // Class that adapts aidl_hal::IPreparedModel to nn::IPreparedModel. 38 class PreparedModel final : public nn::IPreparedModel, 39 public std::enable_shared_from_this<PreparedModel> { 40 struct PrivateConstructorTag {}; 41 42 public: 43 static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create( 44 std::shared_ptr<aidl_hal::IPreparedModel> preparedModel); 45 46 PreparedModel(PrivateConstructorTag tag, 47 std::shared_ptr<aidl_hal::IPreparedModel> preparedModel); 48 49 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute( 50 const nn::Request& request, nn::MeasureTiming measure, 51 const nn::OptionalTimePoint& deadline, 52 const nn::OptionalDuration& loopTimeoutDuration) const override; 53 54 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced( 55 const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, 56 nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, 57 const nn::OptionalDuration& loopTimeoutDuration, 58 const nn::OptionalDuration& timeoutDurationAfterFence) const override; 59 60 nn::GeneralResult<nn::SharedExecution> createReusableExecution( 61 const nn::Request& request, nn::MeasureTiming measure, 62 const nn::OptionalDuration& loopTimeoutDuration) const override; 63 64 nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override; 65 66 std::any getUnderlyingResource() const override; 67 68 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal( 69 const Request& request, bool measure, int64_t deadline, int64_t loopTimeoutDuration, 70 const hal::utils::RequestRelocation& relocation) const; 71 72 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> 73 executeFencedInternal(const Request& request, 74 const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measure, 75 int64_t deadline, int64_t loopTimeoutDuration, 76 int64_t timeoutDurationAfterFence, 77 const hal::utils::RequestRelocation& relocation) const; 78 79 private: 80 const std::shared_ptr<aidl_hal::IPreparedModel> kPreparedModel; 81 }; 82 83 } // namespace aidl::android::hardware::neuralnetworks::utils 84 85 #endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_PREPARED_MODEL_H 86