1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /**
18  * @addtogroup NeuralNetworks
19  * @{
20  */
21 
22 /**
23  * @file NeuralNetworks.h
24  */
25 
26 #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
27 #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
28 
29 /******************************************************************
30  *
31  * IMPORTANT NOTICE:
32  *
33  *   This file is part of Android's set of stable system headers
34  *   exposed by the Android NDK (Native Development Kit).
35  *
36  *   Third-party source AND binary code relies on the definitions
37  *   here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES.
38  *
39  *   - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES)
40  *   - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS
41  *   - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY
42  *   - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES
43  */
44 
45 #include <android/hardware_buffer.h>
46 #include <stdbool.h>
47 #include <stddef.h>
48 #include <stdint.h>
49 #include <sys/cdefs.h>
50 
51 #include "NeuralNetworksTypes.h"
52 
53 // This is required for building libneuralnetworks_cl,
54 // the symbols have same names as in NDK, but
55 // they are not bounded by API availability.
56 #ifdef NN_COMPATIBILITY_LIBRARY_BUILD
57 #define __NNAPI_INTRODUCED_IN(x)
58 #else
59 #define __NNAPI_INTRODUCED_IN(x) __INTRODUCED_IN(x)
60 #endif
61 
62 #ifndef __NNAPI_FL5_MIN_ANDROID_API__
63 #define __NNAPI_FL5_MIN_ANDROID_API__ __ANDROID_API_S__
64 #endif
65 
66 __BEGIN_DECLS
67 
68 /**
69  * Create a {@link ANeuralNetworksMemoryDesc} with no properties.
70  *
71  * This only creates the memory descriptor. Its properties should be set with calls to
72  * {@link ANeuralNetworksMemoryDesc_addInputRole},
73  * {@link ANeuralNetworksMemoryDesc_addOutputRole}, and
74  * {@link ANeuralNetworksMemoryDesc_setDimensions}.
75  *
76  * {@link ANeuralNetworksMemoryDesc_finish} must be called once all properties have been set.
77  *
78  * {@link ANeuralNetworksMemoryDesc_free} must be called once the memory descriptor
79  * is no longer needed.
80  *
81  * Available since NNAPI feature level 4.
82  *
83  * @param desc The {@link ANeuralNetworksMemoryDesc} to be created.
84  *             Set to NULL if unsuccessful.
85  *
86  * @return ANEURALNETWORKS_NO_ERROR if successful.
87  */
88 int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc** desc) __NNAPI_INTRODUCED_IN(30);
89 
90 /**
91  * Destroy a memory descriptor.
92  *
93  * The memory descriptor need not have been finished by a call to
94  * {@link ANeuralNetworksMemoryDesc_finish}.
95  *
96  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
97  *
98  * Available since NNAPI feature level 4.
99  *
100  * @param desc The memory descriptor to be destroyed. Passing NULL is acceptable and
101  *             results in no operation.
102  */
103 void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc* desc) __NNAPI_INTRODUCED_IN(30);
104 
105 /**
106  * Specify that a memory object will be playing the role of an input to an execution created from a
107  * particular compilation.
108  *
109  * The compilation and the input index fully specify an input operand. This function
110  * may be invoked multiple times on the same memory descriptor with different input operands,
111  * and the same input operand may be specified on multiple memory descriptors. However,
112  * specifying the same input operand on the same memory descriptor more than once will
113  * return an error.
114  *
115  * The dimensions of the corresponding model operands of all the roles specified by
116  * {@link ANeuralNetworksMemoryDesc_addInputRole} and
117  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two
118  * dimensions are incompatible if both ranks are fully specified but have different values, or if
119  * there is at least one axis that is fully specified in both but has different values.
120  *
121  * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
122  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on a memory descriptor
123  * before invoking {@link ANeuralNetworksMemoryDesc_finish}.
124  *
125  * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
126  * called will return an error.
127  *
128  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
129  *
130  * Available since NNAPI feature level 4.
131  *
132  * @param desc The memory descriptor to be modified.
133  * @param compilation The compilation object. It must already have been finished by calling
134  *                    {@link ANeuralNetworksCompilation_finish}, and must outlive the memory
135  *                    descriptor.
136  * @param index The index of the input argument we are referencing from the compilation. It is
137  *              an index into the inputs list passed to
138  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
139  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
140  * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the
141  *                  memory is to be used in the specified role. This is provided as a hint to
142  *                  optimize the case when different roles prefer different memory locations or data
143  *                  layouts.
144  *
145  * @return ANEURALNETWORKS_NO_ERROR if successful.
146  */
147 int ANeuralNetworksMemoryDesc_addInputRole(ANeuralNetworksMemoryDesc* desc,
148                                            const ANeuralNetworksCompilation* compilation,
149                                            uint32_t index, float frequency)
150         __NNAPI_INTRODUCED_IN(30);
151 
152 /**
153  * Specify that a memory object will be playing the role of an output to an execution created from a
154  * particular compilation.
155  *
156  * The compilation and the output index fully specify an output operand. This function
157  * may be invoked multiple times on the same memory descriptor with different output operands,
158  * and the same output operand may be specified on multiple memory descriptors. However,
159  * specifying the same output operand on the same memory descriptor object more than once will
160  * return an error.
161  *
162  * The dimensions of the corresponding model operands of all the roles specified by
163  * {@link ANeuralNetworksMemoryDesc_addInputRole} and
164  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two
165  * dimensions are incompatible if both ranks are fully specified but have different values, or if
166  * there is at least one axis that is fully specified in both but has different values.
167  *
168  * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
169  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on the memory descriptor
170  * before invoking {@link ANeuralNetworksMemoryDesc_finish}.
171  *
172  * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
173  * called will return an error.
174  *
175  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
176  *
177  * Available since NNAPI feature level 4.
178  *
179  * @param desc The memory descriptor to be modified.
180  * @param compilation The compilation object. It must already have been finished by calling
181  *                    {@link ANeuralNetworksCompilation_finish}, and must outlive the memory
182  *                    descriptor.
183  * @param index The index of the output argument we are referencing from the compilation. It is
184  *              an index into the outputs list passed to
185  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
186  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
187  * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the
188  *                  memory is to be used in the specified role. This is provided as a hint to
189  *                  optimize the case when multiple roles prefer different memory locations or data
190  *                  layouts.
191  *
192  * @return ANEURALNETWORKS_NO_ERROR if successful.
193  */
194 int ANeuralNetworksMemoryDesc_addOutputRole(ANeuralNetworksMemoryDesc* desc,
195                                             const ANeuralNetworksCompilation* compilation,
196                                             uint32_t index, float frequency)
197         __NNAPI_INTRODUCED_IN(30);
198 
199 /**
200  * Set the dimensional information of the memory descriptor.
201  *
202  * The specified dimensions must be compatible with the dimensions of the corresponding model
203  * operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and
204  * {@link ANeuralNetworksMemoryDesc_addOutputRole}. Two dimensions are incompatible if both ranks
205  * are fully specified but have different values, or if there is at least one axis that is fully
206  * specified in both but has different values.
207  *
208  * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
209  * called will return an error.
210  *
211  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
212  *
213  * Available since NNAPI feature level 4.
214  *
215  * @param desc The memory descriptor to be modified.
216  * @param rank The number of dimensions. Must be 0 for scalars.
217  * @param dimensions An array of dimensions. An entry with the value 0 indicates that the
218  *                   corresponding axis has an unknown size.
219  *
220  * @return ANEURALNETWORKS_NO_ERROR if successful.
221  */
222 int ANeuralNetworksMemoryDesc_setDimensions(ANeuralNetworksMemoryDesc* desc, uint32_t rank,
223                                             const uint32_t* dimensions) __NNAPI_INTRODUCED_IN(30);
224 
225 /**
226  * Indicate that we have finished modifying a memory descriptor. Required before calling
227  * {@link ANeuralNetworksMemory_createFromDesc}.
228  *
229  * This function must only be called once for a given memory descriptor.
230  *
231  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
232  *
233  * Available since NNAPI feature level 4.
234  *
235  * @param desc The memory descriptor to be finished.
236  *
237  * @return ANEURALNETWORKS_NO_ERROR if successful.
238  */
239 int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc* desc) __NNAPI_INTRODUCED_IN(30);
240 
241 /**
242  * Creates a memory object from a memory descriptor.
243  *
244  * The memory object is created with an uninitialized buffer. A memory object with an uninitialized
245  * buffer may only be used according to the roles specified by {@link
246  * ANeuralNetworksMemoryDesc_addOutputRole}, or as the destination memory in {@link
247  * ANeuralNetworksMemory_copy}. The buffer of a memory object is initialized after the memory object
248  * is used as an output in a successful execution, or used as the destination memory in a successful
249  * {@link ANeuralNetworksMemory_copy}. A memory object with an initialized buffer may be used
250  * according to all roles specified in {@link ANeuralNetworksMemoryDesc}, or as the source or
251  * destination memory in {@link ANeuralNetworksMemory_copy}. The buffer of a memory object will
252  * return to the uninitialized state if the memory object is used as an output in a failed
253  * execution, or used as the destination memory in a failed {@link ANeuralNetworksMemory_copy}.
254  *
255  * The dimensions of the memory descriptor are deduced from the dimensions of the corresponding
256  * model operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and
257  * {@link ANeuralNetworksMemoryDesc_addOutputRole}, as well as the dimensions set by the call to
258  * {@link ANeuralNetworksMemoryDesc_setDimensions}, if any. The memory descriptor may have
259  * unspecified dimensions or rank. In such a case, the same memory object may be used with different
260  * shapes of outputs in different executions. When the memory is used as an input, the input shape
261  * must be the same as the output shape from the last execution using this memory object as an
262  * output, or the last {@link ANeuralNetworksMemory_copy} using this memory object as the
263  * destination memory. Creating a memory object with unspecified dimensions or rank may fail for
264  * certain sets of roles.
265  *
266  * Using the memory in roles or shapes that are not compatible with the rules specified above will
267  * return an error.
268  *
269  * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
270  * {@link ANeuralNetworksExecution_setOutputFromMemory} with the memory object,
271  * both offset and length must be set to zero and the entire memory region will be
272  * associated with the specified input or output operand.
273  *
274  * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with the memory created from this
275  * function will return an error.
276  *
277  * {@link ANeuralNetworksMemory_free} must be called once the memory is no longer needed.
278  *
279  * Attempting to create memory from an unfinished memory descriptor will return an error.
280  *
281  * The provided {@link ANeuralNetworksMemoryDesc} need not outlive the {@link ANeuralNetworksMemory}
282  * object.
283  *
284  * Available since NNAPI feature level 4.
285  *
286  * @param desc The memory descriptor.
287  * @param memory The memory object to be created.
288  *               Set to NULL if unsuccessful.
289  *
290  * @return ANEURALNETWORKS_NO_ERROR if successful; ANEURALNETWORKS_OP_FAILED if the memory is
291  *         created with unspecified dimensions or rank and it is not supported for this set of
292  *         roles.
293  */
294 int ANeuralNetworksMemory_createFromDesc(const ANeuralNetworksMemoryDesc* desc,
295                                          ANeuralNetworksMemory** memory) __NNAPI_INTRODUCED_IN(30);
296 
297 /**
298  * Copies data from one memory object to another.
299  *
300  * If at most one of the src and dst is created from {@link ANeuralNetworksMemory_createFromDesc},
301  * the src and dst must have the same logical size:
302  * - If the memory is created from {@link ANeuralNetworksMemory_createFromFd}, or if it is created
303  *   from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with format of
304  *   AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size of the memory.
305  * - If the memory is created from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with a
306  *   format other than AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size when there is
307  *   no padding and the data is tightly packed. This function may fail if the AHardwareBuffer
308  *   cannot be accessed.
309  * - If the memory is created from {@link ANeuralNetworksMemory_createFromDesc}, the logical size
310  *   equals the size indicated by the {@link OperandCode} multiplied by the number of elements. This
311  *   function will fail if the number of elements is unknown.
312  *
313  * If both src and dst are created from {@link ANeuralNetworksMemory_createFromDesc}, they must have
314  * compatible dimensions. Two dimensions are incompatible if both ranks are fully specified but
315  * have different values, or if there is at least one axis that is fully specified in both but has
316  * different values. The dst may have unspecified dimensions or rank. In such a case, the dimensions
317  * of dst will get updated according to the dimensions of the src.
318  *
319  * In both cases, if the src is created from {@link ANeuralNetworksMemory_createFromDesc}, it must
320  * have been used as an output in a successful execution, or used as the destination memory in a
321  * successful {@link ANeuralNetworksMemory_copy}.
322  *
323  * The src and dst may have different data layout, in which case the data copying is performed
324  * logically with data layout transformation.
325  *
326  * Available since NNAPI feature level 4.
327  *
328  * @param src The source memory object.
329  * @param dst The destination memory object.
330  *
331  * @return ANEURALNETWORKS_NO_ERROR if successful.
332  */
333 int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory* src, const ANeuralNetworksMemory* dst)
334         __NNAPI_INTRODUCED_IN(30);
335 
336 /**
337  * Get the number of available devices.
338  *
339  * @param numDevices Used to return the number of devices.
340  *
341  * @return ANEURALNETWORKS_NO_ERROR if successful.
342  *
343  * Available since NNAPI feature level 3.
344  */
345 int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) __NNAPI_INTRODUCED_IN(29);
346 
347 /**
348  * Get the representation of the specified device.
349  *
350  * @param devIndex The index of the specified device. Must be less than the
351                    number of available devices.
352  * @param device The representation of the specified device.
353  *               The same representation will always be returned for the specified
354  *               device.
355  *
356  * @return ANEURALNETWORKS_NO_ERROR if successful.
357  *
358  * Available since NNAPI feature level 3.
359  */
360 int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device)
361         __NNAPI_INTRODUCED_IN(29);
362 
363 /**
364  * Get the name of the specified device.
365  *
366  * @param device The representation of the specified device.
367  * @param name   The returned name of the specified device. The name will be in UTF-8
368  *               and will be null-terminated. It will be recognizable as a known device name
369  *               rather than a cryptic string. For devices with feature level reported by
370  *               {@link ANeuralNetworksDevice_getFeatureLevel} that is
371  *               {@link ANEURALNETWORKS_FEATURE_LEVEL_3} and higher, the format of the name is
372  *               {VENDOR}-{DEVICE}. For devices with feature level
373  *               {@link ANEURALNETWORKS_FEATURE_LEVEL_2} or lower, the format of the name is
374  *               undefined. The name will remain valid for the duration of the application.
375  *
376  * @return ANEURALNETWORKS_NO_ERROR if successful.
377  *
378  * Available since NNAPI feature level 3.
379  */
380 int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name)
381         __NNAPI_INTRODUCED_IN(29);
382 
383 /**
384  * Get the type of a given device.
385  *
386  * The device type can be used to help application developers to distribute Machine Learning
387  * workloads and other workloads such as graphical rendering.
388  * E.g., for an app which renders AR scenes based on real time object detection results,
389  * the developer could choose an ACCELERATOR type device for ML workloads, and reserve GPU
390  * for graphical rendering.
391  *
392  * @param device The representation of the specified device.
393  * @param type The returned {@link DeviceTypeCode} of the specified device.
394  *
395  * @return ANEURALNETWORKS_NO_ERROR if successful.
396  *
397  * Available since NNAPI feature level 3.
398  */
399 int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type)
400         __NNAPI_INTRODUCED_IN(29);
401 
402 /**
403  * Get the version of the driver implementation of the specified device.
404  *
405  * It’s the responsibility of the driver implementor to insure that this version string
406  * uniquely distinguishes this implementation from all previous implementations.
407  *
408  * This version string must not be confused with the feature level which is solely defined
409  * by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no implicit ordering of the versions.
410  * For example, it is not possible to filter all drivers older than a certain version.
411  *
412  * Application developers may use this version string to avoid or prefer specific driver
413  * implementations. For example, an application may want to do so because:
414  *     - A specific version of the driver does not provide the required performance,
415  *       perhaps because of a performance regression.
416  *     - A specific version of the driver has a bug or returns results that don’t match
417  *       the minimum precision requirement for the application.
418  *
419  * @param device The representation of the specified device.
420  * @param version The returned version string of the driver for the specified device. The
421  *                string will be in UTF-8 and will be null-terminated. For devices with feature
422  *                level 28 or lower, "UNKNOWN" will be returned. The version string will remain
423  *                valid for the duration of the application.
424  *
425  * @return ANEURALNETWORKS_NO_ERROR if successful.
426  *
427  * Available since NNAPI feature level 3.
428  */
429 int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version)
430         __NNAPI_INTRODUCED_IN(29);
431 
432 /**
433  * Get the NNAPI feature level of the specified NNAPI device.
434  *
435  * Each device has a supported feature level, which is the most advanced NNAPI specification
436  * and features this driver implements. For example, if the driver implements the features
437  * introduced in {@link ANEURALNETWORKS_FEATURE_LEVEL_2}, but does not implement the features
438  * introduced after {@link ANEURALNETWORKS_FEATURE_LEVEL_2}, the value would be
439  * {@link ANEURALNETWORKS_FEATURE_LEVEL_2}. Developers could decide whether or not the specified
440  * device should be used for a model that has certain feature requirements.
441  *
442  * NNAPI device feature level is closely related to NNAPI runtime feature level
443  * ({@link ANeuralNetworks_getRuntimeFeatureLevel}), which indicates an NNAPI runtime feature
444  * level (the most advanced NNAPI specification and features that the runtime implements).
445  * An NNAPI device feature level is always less than or equal to the runtime feature level.
446  *
447  * This function produces a {@link FeatureLevelCode} enum value, NOT an Android API level.
448  *
449  * @param device The representation of the specified device.
450  * @param featureLevel {@link FeatureLevelCode} of the most advanced feature this driver implements.
451  *
452  * @return ANEURALNETWORKS_NO_ERROR if successful.
453  *
454  * Available since NNAPI feature level 3.
455  */
456 int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device,
457                                           int64_t* featureLevel) __NNAPI_INTRODUCED_IN(29);
458 
459 /**
460  * Wait until the device is in a live state.
461  *
462  * A device may encounter internal errors and temporarily enter a dead state. A
463  * call that uses a device in such a state will return with the error
464  * {@link ANEURALNETWORKS_DEAD_OBJECT}. ANeuralNetworksDevice_wait will block until
465  * the device is in a live state.
466  *
467  * @param device The representation of the specified device.
468  *
469  * @return ANEURALNETWORKS_NO_ERROR if successful.
470  *
471  * Available since NNAPI feature level 4.
472  */
473 int ANeuralNetworksDevice_wait(const ANeuralNetworksDevice* device) __NNAPI_INTRODUCED_IN(30);
474 
475 /**
476  * Get the supported operations for a specified set of devices. If multiple devices
477  * are selected, the supported operation list is a union of supported operations of all
478  * selected devices.
479  *
480  * @param model The model to be queried.
481  * @param devices The set of devices. Must not contain duplicates.
482  * @param numDevices The number of devices in the set.
483  * @param supportedOps The boolean array to be filled. True means supported. The size of the
484  *                     boolean array must be at least as large as the number of operations
485  *                     in the model. The order of elements in the supportedOps array matches
486  *                     the order in which the corresponding operations were added to the model.
487  *
488  * @return ANEURALNETWORKS_NO_ERROR if successful.
489  *
490  * Available since NNAPI feature level 3.
491  */
492 int ANeuralNetworksModel_getSupportedOperationsForDevices(
493         const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
494         uint32_t numDevices, bool* supportedOps) __NNAPI_INTRODUCED_IN(29);
495 
496 /**
497  * Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set
498  * of devices. If more than one device is specified, the compilation will
499  * distribute the workload automatically across the devices. The model must be fully
500  * supported by the specified set of devices. This means that
501  * ANeuralNetworksModel_getSupportedOperationsForDevices() must have returned true for every
502  * operation for that model/devices pair.
503  *
504  * The user must handle all compilation and execution failures from the
505  * specified set of devices. This is in contrast to a use of {@link
506  * ANeuralNetworksCompilation_create}, where the runtime will attempt to recover
507  * from such failures.
508  *
509  * The model passed to this function is termed the "main model" of the
510  * compilation, to distinguish it from other models referred to by an Operand
511  * of type {@link ANEURALNETWORKS_MODEL} within this compilation.
512  *
513  * @param model The {@link ANeuralNetworksModel} to be compiled.
514  * @param devices The set of devices. Must not contain duplicates.
515  * @param numDevices The number of devices in the set.
516  * @param compilation The newly created object or NULL if unsuccessful.
517  *
518  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
519  *         if the model is invalid.
520  *
521  * Available since NNAPI feature level 3.
522  */
523 int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel* model,
524                                                 const ANeuralNetworksDevice* const* devices,
525                                                 uint32_t numDevices,
526                                                 ANeuralNetworksCompilation** compilation)
527         __NNAPI_INTRODUCED_IN(29);
528 
529 /**
530  * Sets the compilation caching signature and the cache directory.
531  *
532  * Provides optional caching information to the runtime for faster repeated
533  * compilation.
534  *
535  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
536  *
537  * @param compilation The compilation to be modified.
538  * @param cacheDir The cache directory for the runtime to store and retrieve caching
539  *                 data. It is recommended to use the code cache directory provided
540  *                 by the Android runtime. If not using the code cache directory, the
541  *                 user should choose a directory local to the application, and is
542  *                 responsible for managing the cache entries.
543  * @param token The token provided by the user to specify a model must be of length
544  *              ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that
545  *              the token is unique to a model within the application. The NNAPI
546  *              runtime cannot detect token collisions; a collision will result in a
547  *              failed execution or in a successful execution that produces incorrect
548  *              output values.
549  *
550  * @return ANEURALNETWORKS_NO_ERROR if successful.
551  *
552  * Available since NNAPI feature level 3.
553  */
554 int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation* compilation,
555                                           const char* cacheDir, const uint8_t* token)
556         __NNAPI_INTRODUCED_IN(29);
557 
558 /**
559  * Schedule synchronous evaluation of the execution.
560  *
561  * <p>Schedules synchronous evaluation of the execution. Returns once the
562  * execution has completed and the outputs are ready to be consumed.
563  * </p>
564  *
565  * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,
566  * and the execution is not able to complete before the timeout duration is
567  * exceeded, then execution may be aborted, in which case
568  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned. If the device has
569  * a feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel}
570  * that is lower than 30, then the timeout duration hint will be ignored.
571  *
572  * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
573  * the condition model does not output false within the loop timeout duration,
574  * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
575  * will be returned.
576  *
577  * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
578  * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
579  * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
580  * the execution is in the completed state.
581  *
582  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
583  *
584  * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
585  * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
586  * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
587  * asynchronous execution with dependencies.
588  *
589  * Available since NNAPI feature level 3.
590  *
591  * @param execution The execution to be scheduled and executed.
592  *
593  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
594  *         ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot
595  *         be properly mapped.
596  */
597 int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) __NNAPI_INTRODUCED_IN(29);
598 
599 /**
600  * Get the dimensional information of the specified output operand of the model of the
601  * latest computation evaluated on {@link ANeuralNetworksExecution}.
602  *
603  * This function may only be invoked when the execution is in the completed state.
604  *
605  * See {@link ANeuralNetworksExecution} for information on execution states.
606  *
607  * @param execution The execution to be queried.
608  * @param index The index of the output argument we are querying. It is
609  *              an index into the lists passed to
610  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
611  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
612  * @param rank The rank of the output operand.
613  *
614  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE
615  *         if the target output is provided an insufficient buffer at execution time,
616  *         ANEURALNETWORKS_BAD_DATA if the index is invalid.
617  *
618  * Available since NNAPI feature level 3.
619  */
620 int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution* execution,
621                                                   int32_t index, uint32_t* rank)
622         __NNAPI_INTRODUCED_IN(29);
623 
624 /**
625  * Get the dimensional information of the specified output operand of the model of the
626  * latest computation evaluated on {@link ANeuralNetworksExecution}. The target output operand
627  * cannot be a scalar.
628  *
629  * This function may only be invoked when the execution is in the completed state.
630  *
631  * See {@link ANeuralNetworksExecution} for information on execution states.
632  *
633  * @param execution The execution to be queried.
634  * @param index The index of the output argument we are querying. It is an index into the lists
635  *              passed to {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
636  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
637  * @param dimensions The dimension array to be filled. The size of the array must be exactly as
638  *                   large as the rank of the output operand to be queried in the model.
639  *
640  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE
641  *         if the target output is provided an insufficient buffer at execution time,
642  *         ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar.
643  *
644  * Available since NNAPI feature level 3.
645  */
646 int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution* execution,
647                                                         int32_t index, uint32_t* dimensions)
648         __NNAPI_INTRODUCED_IN(29);
649 
650 /**
651  * Create a {@link ANeuralNetworksBurst} to apply the given compilation.
652  * This only creates the burst object. Computation is only performed once
653  * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid
654  * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}.
655  *
656  * <p>The provided compilation must outlive the burst object.</p>
657  *
658  * Available since NNAPI feature level 3.
659  *
660  * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
661  * @param burst The newly created object or NULL if unsuccessful.
662  *
663  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
664  *         if the compilation is invalid.
665  */
666 int ANeuralNetworksBurst_create(ANeuralNetworksCompilation* compilation,
667                                 ANeuralNetworksBurst** burst) __NNAPI_INTRODUCED_IN(29);
668 
669 /**
670  * Destroys the burst object.
671  *
672  * Available since NNAPI feature level 3.
673  *
674  * @param burst The burst object to be destroyed. Passing NULL is acceptable and
675  *              results in no operation.
676  */
677 void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) __NNAPI_INTRODUCED_IN(29);
678 
679 /**
680  * Schedule synchronous evaluation of the execution on a burst object.
681  *
682  * <p>Schedules synchronous evaluation of the execution. Returns once the
683  * execution has completed and the outputs are ready to be consumed.</p>
684  *
685  * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution,
686  * and the execution is not able to complete before the timeout duration is
687  * exceeded, then execution may be aborted, in which case
688  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned.
689  *
690  * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
691  * the condition model does not output false within the loop timeout duration,
692  * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
693  * will be returned. If the device has a feature level reported by
694  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
695  * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will be ignored.
696  *
697  * <p>There must be at most one {@link ANeuralNetworksExecution} processing at
698  * any given time for any given burst object. Any
699  * {@link ANeuralNetworksExecution} launched before the previous has finished
700  * will result in ANEURALNETWORKS_BAD_STATE.</p>
701  *
702  * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
703  * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
704  * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
705  * the execution is in the completed state.
706  *
707  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
708  *
709  * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
710  * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
711  * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
712  * asynchronous execution with dependencies.
713  *
714  * Available since NNAPI feature level 3.
715  *
716  * @param burst The burst object to execute on.
717  * @param execution The execution to be scheduled and executed. The execution
718  *                  must be created from the same {@link
719  *                  ANeuralNetworksCompilation} as the burst object.
720  *
721  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
722  */
723 int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* execution,
724                                           ANeuralNetworksBurst* burst) __NNAPI_INTRODUCED_IN(29);
725 
726 /**
727  * Creates a shared memory object from an AHardwareBuffer handle.
728  *
729  * If the shared memory is backed by an AHardwareBuffer of AHARDWAREBUFFER_FORMAT_BLOB
730  * format, it can be used the same way as shared memory created from a file handle. See
731  * {@link ANeuralNetworksMemory} for a description on how to use this shared memory.
732  *
733  * If the shared memory is backed by an AHardwareBuffer of a format other than
734  * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for model inputs and outputs.
735  * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
736  * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both
737  * offset and length must be set to zero and the entire memory region will be
738  * associated with the specified input or output operand. There is no guarantee
739  * that an arbitrary AHardwareBuffer_Format and AHardwareBuffer_UsageFlags combination
740  * can be used by arbitrary devices. The execution will fail if the selected set of
741  * devices cannot consume the buffer.
742  *
743  * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared memory
744  * backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is
745  * disallowed.
746  *
747  * The provided AHardwareBuffer must outlive the ANeuralNetworksMemory object.
748  *
749  * Available since NNAPI feature level 3.
750  *
751  * @param ahwb The AHardwareBuffer handle.
752  * @param memory The memory object to be created.
753  *               Set to NULL if unsuccessful.
754  *
755  * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
756  *
757  * @see AHardwareBuffer
758  */
759 int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb,
760                                                     ANeuralNetworksMemory** memory)
761         __NNAPI_INTRODUCED_IN(29);
762 
763 /**
764 
765  * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be
766  * measured. Evaluation of the execution must not have been scheduled.
767  *
768  * By default, duration is not measured.
769  *
770  * The {@link ANeuralNetworksExecution} must have been created from an
771  * {@link ANeuralNetworksCompilation} which in turn was created from
772  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1.
773  * If the device has a feature level reported by
774  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
775  * {@link ANEURALNETWORKS_FEATURE_LEVEL_3}, then the duration will not be measured.
776  *
777  * This function may only be invoked when the execution is in the preparation state.
778  *
779  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
780  *
781  * Available since NNAPI feature level 3.
782  *
783  * @param execution The execution to be modified.
784  * @param measure 'true' if duration is to be measured, 'false' if not.
785  *
786  * @return ANEURALNETWORKS_NO_ERROR if successful.
787  */
788 int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution* execution, bool measure)
789         __NNAPI_INTRODUCED_IN(29);
790 
791 /**
792  * Get the time spent in the latest computation evaluated on the specified
793  * {@link ANeuralNetworksExecution}, in nanoseconds.
794  *
795  * This function may only be invoked when the execution is in the completed state.
796  *
797  * See {@link ANeuralNetworksExecution} for information on execution states.
798  *
799  * @param execution The execution to be queried.
800  * @param durationCode The measurement to be queried, specified by {@link DurationCode}.
801  * @param duration The returned duration. If no measurement was requested by
802  *                 {@link ANeuralNetworksExecution_setMeasureTiming}, if the
803  *                 device is has a feature level reported by
804  *                 {@link ANeuralNetworksDevice_getFeatureLevel} that is lower
805  *                 than {@link ANEURALNETWORKS_FEATURE_LEVEL_3}, or for some other
806  *                 reason the duration is not available, UINT64_MAX will be returned.
807  *                 A particular device need not support any given measurement.
808  *
809  * @return ANEURALNETWORKS_NO_ERROR if successful.
810  *
811  * Available since NNAPI feature level 3.
812  */
813 int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution* execution,
814                                          int32_t durationCode, uint64_t* duration)
815         __NNAPI_INTRODUCED_IN(29);
816 
817 /**
818  * Creates a shared memory object from a file descriptor.
819  *
820  * The shared memory is backed by a file descriptor via mmap.
821  * See {@link ANeuralNetworksMemory} for a description on how to use
822  * this shared memory.
823  *
824  * Available since NNAPI feature level 1.
825  *
826  * @param size The requested size in bytes.
827  *             Must not be larger than the file size.
828  * @param protect The desired memory protection for the mapping.
829  *             It is either PROT_NONE or the bitwise OR of one or
830  *             more of the following flags: PROT_READ, PROT_WRITE.
831  * @param fd The requested file descriptor.
832  *           The file descriptor has to be mmap-able. The file
833  *           descriptor will be duplicated.
834  * @param offset The offset to the beginning of the file of the area to map.
835  *               The offset has to be aligned to a page size.
836  * @param memory The memory object to be created.
837  *               Set to NULL if unsuccessful.
838  *
839  * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
840  */
841 int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
842                                        ANeuralNetworksMemory** memory) __NNAPI_INTRODUCED_IN(27);
843 
844 /**
845  * Delete a memory object.
846  *
847  * Destroys the object used by the run time to keep track of the memory.
848  * This will free the underlying actual memory if no other code has open
849  * handles to this memory.
850  *
851  * Available since NNAPI feature level 1.
852  *
853  * @param memory The memory object to be freed. Passing NULL is acceptable and
854  *               results in no operation.
855  */
856 void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __NNAPI_INTRODUCED_IN(27);
857 
858 /**
859  * Create an empty {@link ANeuralNetworksModel}.
860  *
861  * <p>This only creates the object. Computation is performed once
862  * {@link ANeuralNetworksExecution_burstCompute},
863  * {@link ANeuralNetworksExecution_compute},
864  * {@link ANeuralNetworksExecution_startCompute} or
865  * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.
866  *
867  * The model should be constructed with calls to
868  * {@link ANeuralNetworksModel_addOperation} and
869  * {@link ANeuralNetworksModel_addOperand}
870  *
871  * <p>{@link ANeuralNetworksModel_finish} should be called once the model
872  * has been fully constructed.</p>
873  *
874  * <p>{@link ANeuralNetworksModel_free} should be called once the model
875  * is no longer needed.</p>
876  *
877  * Available since NNAPI feature level 1.
878  *
879  * @param model The {@link ANeuralNetworksModel} to be created.
880  *              Set to NULL if unsuccessful.
881  *
882  * @return ANEURALNETWORKS_NO_ERROR if successful.
883  */
884 int ANeuralNetworksModel_create(ANeuralNetworksModel** model) __NNAPI_INTRODUCED_IN(27);
885 
886 /**
887  * Destroy a model.
888  *
889  * The model need not have been finished by a call to
890  * {@link ANeuralNetworksModel_finish}.
891  *
892  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
893  *
894  * Available since NNAPI feature level 1.
895  *
896  * @param model The model to be destroyed. Passing NULL is acceptable and
897  *              results in no operation.
898  */
899 void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __NNAPI_INTRODUCED_IN(27);
900 
901 /**
902  * Indicate that we have finished modifying a model. Required before
903  * calling {@link ANeuralNetworksCompilation_create} and
904  * {@link ANeuralNetworksCompilation_createForDevices}.
905  *
906  * An application must ensure that no other thread uses the model at the same
907  * time.
908  *
909  * This function must only be called once for a given model.
910  *
911  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
912  *
913  * Available since NNAPI feature level 1.
914  *
915  * @param model The model to be finished.
916  *
917  * @return ANEURALNETWORKS_NO_ERROR if successful.
918  */
919 int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) __NNAPI_INTRODUCED_IN(27);
920 
921 /**
922  * Add an operand to a model.
923  *
924  * The order in which the operands are added is important. The first one added
925  * to a model will have the index value 0, the second 1, etc. These indexes are
926  * used as operand identifiers in
927  * {@link ANeuralNetworksModel_addOperation},
928  * {@link ANeuralNetworksModel_identifyInputsAndOutputs},
929  * {@link ANeuralNetworksModel_setOperandValue},
930  * {@link ANeuralNetworksModel_setOperandValueFromMemory},
931  * {@link ANeuralNetworksExecution_setInput},
932  * {@link ANeuralNetworksExecution_setInputFromMemory},
933  * {@link ANeuralNetworksExecution_setOutput}, and
934  * {@link ANeuralNetworksExecution_setOutputFromMemory}.
935  *
936  * <p>Every operand must be referenced in exactly one of the following
937  * ways:<ul>
938  *    <li>It is identified as a model input with
939  *        {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</li>
940  *    <li>It is identified as a constant with
941  *        {@link ANeuralNetworksModel_setOperandValue} or
942  *        {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
943  *    <li>It is identified as an output of exactly one operation with
944  *        {@link ANeuralNetworksModel_addOperation}.</li>
945  *    </ul></p>
946  * <p>An operand that is identified as a model input or as a constant
947  * must not also be identified as a model output with
948  * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p>
949  *
950  * To build a model that can accommodate inputs of various sizes, as
951  * you may want to do for a CNN, leave unspecified the dimensions that
952  * will vary at run time.  If you do so, fully specify dimensions
953  * when calling {@link ANeuralNetworksExecution_setInput} or
954  * {@link ANeuralNetworksExecution_setInputFromMemory}.
955  *
956  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
957  * called will return an error.
958  *
959  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
960  *
961  * Available since NNAPI feature level 1.
962  *
963  * @param model The model to be modified.
964  * @param type The {@link ANeuralNetworksOperandType} that describes the shape
965  *             of the operand.  Neither the {@link ANeuralNetworksOperandType}
966  *             nor the dimensions it points to need to outlive the call to
967  *             {@link ANeuralNetworksModel_addOperand}.
968  *
969  * @return ANEURALNETWORKS_NO_ERROR if successful.
970  */
971 int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model,
972                                     const ANeuralNetworksOperandType* type)
973         __NNAPI_INTRODUCED_IN(27);
974 
975 /**
976  * Sets an operand to a constant value.
977  *
978  * Values of length smaller or equal to
979  * ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES
980  * are immediately copied into the model.
981  *
982  * For values of length greater than
983  * ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES, a pointer to
984  * the buffer is stored within the model. The application must not change the
985  * content of this region until all executions using this model have
986  * completed. As the data may be copied during processing, modifying the data
987  * after this call yields undefined results. The provided buffer must outlive
988  * this model.
989  *
990  * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory}
991  * is likely to be more efficient.
992  *
993  * To indicate that an optional operand should be considered missing,
994  * pass nullptr for buffer and 0 for length.
995  *
996  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
997  * called will return an error.
998  *
999  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1000  *
1001  * Available since NNAPI feature level 1.
1002  *
1003  * @param model The model to be modified.
1004  * @param index The index of the model operand we're setting.
1005  * @param buffer A pointer to the data to use.
1006  * @param length The size in bytes of the data value.
1007  *
1008  * @return ANEURALNETWORKS_NO_ERROR if successful.
1009  */
1010 int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index,
1011                                          const void* buffer, size_t length)
1012         __NNAPI_INTRODUCED_IN(27);
1013 
1014 /**
1015  * Sets an operand's per channel quantization parameters.
1016  *
1017  * Sets parameters required by a tensor of type
1018  * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}.
1019  * This function must be called for every tensor of type
1020  * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before
1021  * calling {@link ANeuralNetworksModel_finish}.
1022  *
1023  * Available since NNAPI feature level 3.
1024  *
1025  * @param model The model to be modified.
1026  * @param index The index of the model operand we're setting.
1027  * @param channelQuant The per channel quantization parameters for the operand.
1028  *                    No memory in this struct needs to outlive the call to
1029  *                    this function.
1030  *
1031  * @return ANEURALNETWORKS_NO_ERROR if successful.
1032  */
1033 int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
1034         ANeuralNetworksModel* model, int32_t index,
1035         const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) __NNAPI_INTRODUCED_IN(29);
1036 
1037 /**
1038  * Sets an operand to a value stored in a memory object.
1039  *
1040  * The content of the memory is not copied. A reference to that memory is stored
1041  * inside the model. The application must not change the content of the memory
1042  * region until all executions using this model have completed.  As the data may
1043  * be copied during processing, modifying the data after this call yields
1044  * undefined results.
1045  *
1046  * <p>The provided memory must outlive this model.</p>
1047  *
1048  * To indicate that an optional operand should be considered missing,
1049  * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer.
1050  *
1051  * It is disallowed to set an operand value with shared memory backed by an AHardwareBuffer
1052  * of a format other than AHARDWAREBUFFER_FORMAT_BLOB.
1053  *
1054  * It is disallowed to set an operand value with memory created from
1055  * {@link ANeuralNetworksMemory_createFromDesc}.
1056  *
1057  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1058  * called will return an error.
1059  *
1060  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1061  * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
1062  * AHardwareBuffer usage.
1063  *
1064  * Available since NNAPI feature level 1.
1065  *
1066  * @param model The model to be modified.
1067  * @param index The index of the model operand we're setting.
1068  * @param memory The memory containing the data.
1069  * @param offset This specifies the location of the data within the memory.
1070  *               The offset is in bytes from the start of memory.
1071  * @param length The size in bytes of the data value.
1072  *
1073  * @return ANEURALNETWORKS_NO_ERROR if successful.
1074  */
1075 int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index,
1076                                                    const ANeuralNetworksMemory* memory,
1077                                                    size_t offset, size_t length)
1078         __NNAPI_INTRODUCED_IN(27);
1079 
1080 /**
1081  * Sets an operand to a value that is a reference to another NNAPI model.
1082  *
1083  * The referenced model must already have been finished by a call to
1084  * {@link ANeuralNetworksModel_finish}.
1085  *
1086  * The {@link ANeuralNetworksModel_relaxComputationFloat32toFloat16} setting of
1087  * referenced models is overridden by that setting of the main model of a
1088  * compilation.
1089  *
1090  * The referenced model must outlive the model referring to it.
1091  *
1092  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
1093  * been called will return an error.
1094  *
1095  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1096  *
1097  * Available since NNAPI feature level 4.
1098  *
1099  * @param model The model to be modified.
1100  * @param index The index of the model operand we're setting.
1101  * @param value The model to be referenced.
1102  *
1103  * @return ANEURALNETWORKS_NO_ERROR if successful.
1104  */
1105 int ANeuralNetworksModel_setOperandValueFromModel(ANeuralNetworksModel* model, int32_t index,
1106                                                   const ANeuralNetworksModel* value)
1107         __NNAPI_INTRODUCED_IN(30);
1108 
1109 /**
1110  * Add an operation to a model.
1111  *
1112  * @param model The model to be modified.
1113  * @param type The {@link ANeuralNetworksOperationType} of the operation.
1114  * @param inputCount The number of entries in the inputs array.
1115  * @param inputs An array of indexes identifying each operand.
1116  * @param outputCount The number of entries in the outputs array.
1117  * @param outputs An array of indexes identifying each operand.
1118  *
1119  * The operands specified by inputs and outputs must have been
1120  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
1121  *
1122  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1123  * called will return an error.
1124  *
1125  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1126  *
1127  * Available since NNAPI feature level 1.
1128  *
1129  * @return ANEURALNETWORKS_NO_ERROR if successful.
1130  */
1131 int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
1132                                       ANeuralNetworksOperationType type, uint32_t inputCount,
1133                                       const uint32_t* inputs, uint32_t outputCount,
1134                                       const uint32_t* outputs) __NNAPI_INTRODUCED_IN(27);
1135 
1136 /**
1137  * Specifies which operands will be the model's inputs and
1138  * outputs. Every model must have at least one input and one output.
1139  *
1140  * An operand cannot be used for both input and output. Doing so will
1141  * return an error.
1142  *
1143  * @param model The model to be modified.
1144  * @param inputCount The number of entries in the inputs array.
1145  * @param inputs An array of indexes identifying the input operands.
1146  * @param outputCount The number of entries in the outputs array.
1147  * @param outputs An array of indexes identifying the output operands.
1148  *
1149  * The operands specified by inputs and outputs must have been
1150  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
1151  *
1152  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1153  * called will return an error.
1154  *
1155  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1156  *
1157  * Available since NNAPI feature level 1.
1158  *
1159  */
1160 int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount,
1161                                                   const uint32_t* inputs, uint32_t outputCount,
1162                                                   const uint32_t* outputs)
1163         __NNAPI_INTRODUCED_IN(27);
1164 
1165 /**
1166  * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
1167  * calculated with range and/or precision as low as that of the IEEE 754 16-bit
1168  * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1169  * must be calculated using at least the range and precision of the IEEE 754
1170  * 32-bit floating-point format.
1171  *
1172  * The relaxComputationFloat32toFloat16 setting of the main model of
1173  * a compilation overrides the values of the referenced models.
1174  *
1175  * @param model The model to be modified.
1176  * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
1177  *              calculated with range and/or precision as low as that of the
1178  *              IEEE 754 16-bit floating point format. 'false' indicates
1179  *              {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
1180  *              at least the range and precision of the IEEE 754 32-bit floating
1181  *              point format.
1182  *
1183  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1184  * called will return an error.
1185  *
1186  * Available since NNAPI feature level 2.
1187  *
1188  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1189  */
1190 int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow)
1191         __NNAPI_INTRODUCED_IN(28);
1192 
1193 /**
1194  * Create a {@link ANeuralNetworksCompilation} to compile the given model.
1195  *
1196  * The model passed to this function is termed the "main model" of the
1197  * compilation, to distinguish it from other models referred to by an Operand
1198  * of type {@link ANEURALNETWORKS_MODEL} within this compilation.
1199  *
1200  * <p>This function only creates the object. Compilation is only performed once
1201  * {@link ANeuralNetworksCompilation_finish} is invoked.</p>
1202  *
1203  * <p>{@link ANeuralNetworksCompilation_finish} should be called once
1204  * all desired properties have been set on the compilation.</p>
1205  *
1206  * <p>{@link ANeuralNetworksModel_free} should be called once the compilation
1207  * is no longer needed.</p>
1208  *
1209  * <p>The provided model must outlive the compilation.</p>
1210  *
1211  * The model must already have been finished by a call to
1212  * {@link ANeuralNetworksModel_finish}.
1213  *
1214  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1215  *
1216  * Available since NNAPI feature level 1.
1217  *
1218  * @param model The {@link ANeuralNetworksModel} to be compiled.
1219  * @param compilation The newly created object or NULL if unsuccessful.
1220  *
1221  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
1222  *         if the model is invalid.
1223  */
1224 int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model,
1225                                       ANeuralNetworksCompilation** compilation)
1226         __NNAPI_INTRODUCED_IN(27);
1227 
1228 /**
1229  * Destroy a compilation.
1230  *
1231  * The compilation need not have been finished by a call to
1232  * {@link ANeuralNetworksCompilation_finish}.
1233  *
1234  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1235  *
1236  * Available since NNAPI feature level 1.
1237  *
1238  * @param compilation The compilation to be destroyed. Passing NULL is acceptable and
1239  *                    results in no operation.
1240  */
1241 void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation)
1242         __NNAPI_INTRODUCED_IN(27);
1243 
1244 /**
1245  * Sets the execution preference.
1246  *
1247  * <p>Provides guidance to the runtime when trade-offs are possible. By default the runtime
1248  * uses PREFER_SINGLE_FAST_ANSWER</p>
1249  *
1250  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1251  *
1252  * Available since NNAPI feature level 1.
1253  *
1254  * @param compilation The compilation to be modified.
1255  * @param preference Either {@link ANEURALNETWORKS_PREFER_LOW_POWER},
1256  *                  {@link ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER}, or
1257  *                  {@link ANEURALNETWORKS_PREFER_SUSTAINED_SPEED}.
1258  *
1259  * @return ANEURALNETWORKS_NO_ERROR if successful.
1260  */
1261 int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compilation,
1262                                              int32_t preference) __NNAPI_INTRODUCED_IN(27);
1263 
1264 /**
1265  * Indicate that we have finished modifying a compilation. Required before
1266  * calling {@link ANeuralNetworksBurst_create} or
1267  * {@link ANeuralNetworksExecution_create}.
1268  *
1269  * An application must ensure that no other thread uses the compilation at the
1270  * same time.
1271  *
1272  * This function must only be called once for a given compilation.
1273  *
1274  * If {@link ANeuralNetworksCompilation_setTimeout} was called on this
1275  * compilation, and the compilation is not able to be finished before the
1276  * timeout duration is exceeded, then compilation may be aborted, in which case
1277  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned.
1278  *
1279  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1280  *
1281  * Available since NNAPI feature level 1.
1282  *
1283  * @param compilation The compilation to be finished.
1284  *
1285  * @return ANEURALNETWORKS_NO_ERROR if successful.
1286  */
1287 int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation)
1288         __NNAPI_INTRODUCED_IN(27);
1289 
1290 /**
1291  * Set the execution priority.
1292  *
1293  * Execution priorities are relative to other executions created by the same
1294  * application (specifically same uid) for the same device. Specifically,
1295  * priorities of executions from one application will not affect executions from
1296  * another application. Similarly, priorities of executions on one device will
1297  * not affect executions on another device.
1298  *
1299  * Higher priority executions may use more compute resources than lower priority
1300  * executions, and may preempt or starve lower priority executions.
1301  *
1302  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1303  *
1304  * Available since NNAPI feature level 4.
1305  *
1306  * @param compilation The compilation to be modified.
1307  * @param priority The relative priority of the execution compared to other
1308  *     executions created by the application. Must be one of
1309  *     ANEURALNETWORKS_PRIORITY_*.
1310  *
1311  * @return ANEURALNETWORKS_NO_ERROR if successful.
1312  */
1313 int ANeuralNetworksCompilation_setPriority(ANeuralNetworksCompilation* compilation, int priority)
1314         __NNAPI_INTRODUCED_IN(30);
1315 
1316 /**
1317  * Set the maximum expected duration for compiling the model.
1318  *
1319  * If the device is not able to complete the compilation within the specified
1320  * duration, the compilation may be aborted. The timeout duration begins at the
1321  * call to {@link ANeuralNetworksCompilation_finish}.
1322  *
1323  * This timeout duration acts as a hint to drivers, and can be used to both free
1324  * up compute resources within the driver and return control back to the
1325  * application quicker than is possible without the hint. It enables drivers
1326  * that are able to estimate how long a compilation will take to abort the
1327  * compilation before it has even started if the driver believes the compilation
1328  * cannot be completed within the timeout duration. Similarly, it enables
1329  * drivers to abort an ongoing compilation if it is taking too long. However,
1330  * this call does not guarantee that the compilation will complete or abort
1331  * within the timeout duration.
1332  *
1333  * By default (i.e., unless ANeuralNetworksCompilation_setTimeout is called),
1334  * the timeout duration for compiling the model is considered infinite.
1335  *
1336  * The {@link ANeuralNetworksCompilation} must have been created with
1337  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
1338  * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the
1339  * device has a feature level reported by
1340  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
1341  * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will
1342  * be ignored.
1343  *
1344  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1345  *
1346  * @param compilation The compilation to be modified.
1347  * @param duration The maximum amount of time in nanoseconds that is expected to
1348  *     be spent finishing a compilation. If this duration is exceeded, the
1349  *     compilation may be aborted. If set to 0, the timeout duration is
1350  *     considered infinite.
1351  *
1352  * @return ANEURALNETWORKS_NO_ERROR if successful.
1353  *
1354  * Available since NNAPI feature level 4.
1355  */
1356 int ANeuralNetworksCompilation_setTimeout(ANeuralNetworksCompilation* compilation,
1357                                           uint64_t duration) __NNAPI_INTRODUCED_IN(30);
1358 
1359 /**
1360  * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
1361  * This only creates the object. Computation is only performed once
1362  * {@link ANeuralNetworksExecution_burstCompute},
1363  * {@link ANeuralNetworksExecution_compute},
1364  * {@link ANeuralNetworksExecution_startCompute} or
1365  * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.
1366  *
1367  * <p>The provided compilation must outlive the execution.</p>
1368  *
1369  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
1370  *
1371  * Available since NNAPI feature level 1.
1372  *
1373  * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
1374  * @param execution The newly created object or NULL if unsuccessful.
1375  *
1376  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
1377  *         if the compilation is invalid.
1378  */
1379 int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation,
1380                                     ANeuralNetworksExecution** execution) __NNAPI_INTRODUCED_IN(27);
1381 
1382 /**
1383  * Destroy an execution.
1384  *
1385  * <p>The execution need not have been scheduled by a call to
1386  * {@link ANeuralNetworksExecution_burstCompute},
1387  * {@link ANeuralNetworksExecution_compute},
1388  * {@link ANeuralNetworksExecution_startCompute} or
1389  * {@link ANeuralNetworksExecution_startComputeWithDependencies}; but if it has been scheduled,
1390  * then the application must not call {@link ANeuralNetworksExecution_free}
1391  * until the execution has completed (i.e.,
1392  * {@link ANeuralNetworksExecution_burstCompute},
1393  * {@link ANeuralNetworksExecution_compute}, or
1394  * {@link ANeuralNetworksEvent_wait} has returned).
1395  *
1396  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
1397  *
1398  * Available since NNAPI feature level 1.
1399  *
1400  * @param execution The execution to be destroyed. Passing NULL is acceptable and
1401  *                  results in no operation.
1402  */
1403 void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __NNAPI_INTRODUCED_IN(27);
1404 
1405 /**
1406  * Associate a user buffer with an input of the model of the
1407  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1408  * been scheduled. Once evaluation of the execution has been scheduled, the
1409  * application must not change the content of the buffer until the execution has
1410  * completed. Evaluation of the execution will not change the content of the
1411  * buffer.
1412  *
1413  * <p>The provided buffer must outlive the execution.</p>
1414  *
1415  * If the input is optional, you can indicate that it is omitted by
1416  * passing nullptr for buffer and 0 for length.
1417  *
1418  * Otherwise, if the user has not set the execution to accept padded input buffers by
1419  * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1420  * must be equal to the raw size of the input (i.e. the size of an element multiplied by the
1421  * number of elements). Passing a length argument with value not equal to the raw size of the input
1422  * will result in ANEURALNETWORKS_BAD_DATA.
1423  *
1424  * Otherwise, if the user has set the execution to accept padded input buffers by calling
1425  * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1426  * than the raw size of the input, and the extra bytes at the end of the buffer may be used
1427  * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1428  * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA.
1429  *
1430  * This function may only be invoked when the execution is in the preparation state.
1431  *
1432  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1433  * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
1434  * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
1435  * preferred buffer alignment and padding, to improve performance.
1436  *
1437  * Available since NNAPI feature level 1.
1438  *
1439  * @param execution The execution to be modified.
1440  * @param index The index of the input argument we are setting. It is
1441  *              an index into the lists passed to
1442  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1443  *              the index associated with
1444  *              {@link ANeuralNetworksModel_addOperand}.
1445  * @param type The {@link ANeuralNetworksOperandType} of the
1446  *             operand. Unless the input is omitted, this should be
1447  *             used to specify the dimensions that were left
1448  *             unspecified when the operand was added to the
1449  *             model. All other properties of the type must be the
1450  *             same as specified in the model. If the type is the same
1451  *             as specified when the model was built, NULL can be
1452  *             passed. Neither the {@link ANeuralNetworksOperandType}
1453  *             nor the dimensions it points to need to outlive the call
1454  *             to {@link ANeuralNetworksExecution_setInput}.
1455  * @param buffer The buffer containing the data.
1456  * @param length The size of the data value in bytes plus any end padding.
1457  *
1458  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1459  *         name is not recognized or the buffer is too small for the input.
1460  */
1461 int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index,
1462                                       const ANeuralNetworksOperandType* type, const void* buffer,
1463                                       size_t length) __NNAPI_INTRODUCED_IN(27);
1464 
1465 /**
1466  * Associate a region of a memory object with an input of the model of the
1467  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1468  * been scheduled. Once evaluation of the execution has been scheduled, the
1469  * application must not change the content of the region until the execution has
1470  * completed. Evaluation of the execution will not change the content of the
1471  * region.
1472  *
1473  * <p>The provided memory must outlive the execution.</p>
1474  *
1475  * If the input is optional, you can indicate that it is omitted by
1476  * using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for
1477  * buffer and 0 for length.
1478  *
1479  * If the memory is an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB created
1480  * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer}, or an opaque memory object created
1481  * from {@link ANeuralNetworksMemory_createFromDesc}, both offset and length must be 0, indicating
1482  * the whole memory is used.
1483  *
1484  * Otherwise, if the user has not set the execution to accept padded input memory objects by
1485  * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1486  * must be equal to the raw size of the input (i.e. the size of an element multiplied by the
1487  * number of elements). Passing a length argument with value not equal to the raw size of the input
1488  * will result in ANEURALNETWORKS_BAD_DATA.
1489  *
1490  * Otherwise, if the user has set the execution to accept padded input memory objects by calling
1491  * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1492  * than the raw size of the input, and the extra bytes at the end of the memory region may be used
1493  * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1494  * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA.
1495  *
1496  * This function may only be invoked when the execution is in the preparation state.
1497  *
1498  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1499  * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
1500  * AHardwareBuffer usage.
1501  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
1502  * created from memory descriptors.
1503  * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
1504  * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
1505  * preferred memory alignment and padding, to improve performance.
1506  *
1507  * Available since NNAPI feature level 1.
1508  *
1509  * @param execution The execution to be modified.
1510  * @param index The index of the input argument we are setting. It is
1511  *              an index into the lists passed to
1512  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1513  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
1514  * @param type The {@link ANeuralNetworksOperandType} of the
1515  *             operand. This should be used to specify the dimensions
1516  *             that were left unspecified when the operand was added
1517  *             to the model. All other properties of the type must be
1518  *             the same as specified in the model. If the type is the
1519  *             same as specified when the model was built, NULL can be
1520  *             passed. Neither the {@link ANeuralNetworksOperandType}
1521  *             nor the dimensions it points to need to outlive the call
1522  *             to {@link ANeuralNetworksExecution_setInputFromMemory}.
1523  * @param memory The memory containing the data.
1524  * @param offset This specifies the location of the data within the memory.
1525  *               The offset is in bytes from the start of memory.
1526  * @param length The size of the data value in bytes plus any end padding.
1527  *
1528  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1529  *         name is not recognized or the buffer is too small for the input.
1530  */
1531 int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
1532                                                 const ANeuralNetworksOperandType* type,
1533                                                 const ANeuralNetworksMemory* memory, size_t offset,
1534                                                 size_t length) __NNAPI_INTRODUCED_IN(27);
1535 
1536 /**
1537  * Associate a user buffer with an output of the model of the
1538  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1539  * been scheduled. Once evaluation of the execution has been scheduled, the
1540  * application must not change the content of the buffer until the execution has
1541  * completed.
1542  *
1543  * <p>The provided buffer must outlive the execution.</p>
1544  *
1545  * If the output is optional, you can indicate that it is omitted by
1546  * passing nullptr for buffer and 0 for length.
1547  *
1548  * Otherwise, if the user has not set the execution to accept padded output buffers by
1549  * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1550  * must be equal to the raw size of the output (i.e. the size of an element multiplied by the
1551  * number of elements). Passing a length argument with value not equal to the raw size of the output
1552  * will result in ANEURALNETWORKS_BAD_DATA.
1553  *
1554  * Otherwise, if the user has set the execution to accept padded output buffers by calling
1555  * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1556  * than the raw size of the output, and the extra bytes at the end of the buffer may be used
1557  * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1558  * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA.
1559  *
1560  * This function may only be invoked when the execution is in the preparation state.
1561  *
1562  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1563  * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
1564  * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
1565  * preferred buffer alignment and padding, to improve performance.
1566  *
1567  * Available since NNAPI feature level 1.
1568  *
1569  * @param execution The execution to be modified.
1570  * @param index The index of the output argument we are setting. It is
1571  *              an index into the lists passed to
1572  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1573  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
1574  * @param type The {@link ANeuralNetworksOperandType} of the
1575  *             operand. Unless the output is omitted, this should be
1576  *             used to specify the dimensions that were left
1577  *             unspecified when the operand was added to the
1578  *             model. All other properties of the type must be the
1579  *             same as specified in the model. If the type is the same
1580  *             as specified when the model was built, NULL can be
1581  *             passed. Neither the {@link ANeuralNetworksOperandType}
1582  *             nor the dimensions it points to need to outlive the call
1583  *             to {@link ANeuralNetworksExecution_setOutput}.
1584  *             Since NNAPI feature level 3, the output operand can have unspecified
1585  *             dimensions or rank to be deduced dynamically during the execution.
1586  *             However, the user must provide a large enough buffer. The user
1587  *             can retrieve the output dimensional information after the execution
1588  *             by {@link ANeuralNetworksExecution_getOutputOperandRank} and
1589  *             {@link ANeuralNetworksExecution_getOutputOperandDimensions}.
1590  * @param buffer The buffer where the data is to be written.
1591  * @param length The size of the data value in bytes plus any end padding.
1592  *
1593  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1594  *         name is not recognized or the buffer is too small for the output.
1595  */
1596 int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index,
1597                                        const ANeuralNetworksOperandType* type, void* buffer,
1598                                        size_t length) __NNAPI_INTRODUCED_IN(27);
1599 
1600 /**
1601  * Associate a region of a memory object with an output of the model of the
1602  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1603  * been scheduled. Once evaluation of the execution has been scheduled, the
1604  * application must not change the content of the region until the execution has
1605  * completed.
1606  *
1607  * <p>The provided memory must outlive the execution.</p>
1608  *
1609  * If the output is optional, you can indicate that it is omitted by
1610  * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for
1611  * buffer and 0 for length.
1612  *
1613  * If the memory is an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB created
1614  * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer}, or an opaque memory object created
1615  * from {@link ANeuralNetworksMemory_createFromDesc}, both offset and length must be 0, indicating
1616  * the whole memory is used.
1617  *
1618  * Otherwise, if the user has not set the execution to accept padded output memory objects by
1619  * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1620  * must be equal to the raw size of the output (i.e. the size of an element multiplied by the
1621  * number of elements). Passing a length argument with value not equal to the raw size of the output
1622  * will result in ANEURALNETWORKS_BAD_DATA.
1623  *
1624  * Otherwise, if the user has set the execution to accept padded output memory objects by calling
1625  * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1626  * than the raw size of the output, and the extra bytes at the end of the memory region may be used
1627  * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1628  * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA.
1629  *
1630  * This function may only be invoked when the execution is in the preparation state.
1631  *
1632  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1633  * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
1634  * AHardwareBuffer usage.
1635  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
1636  * created from memory descriptors.
1637  * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
1638  * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
1639  * preferred memory alignment and padding, to improve performance.
1640  *
1641  * Available since NNAPI feature level 1.
1642  *
1643  * @param execution The execution to be modified.
1644  * @param index The index of the output argument we are setting. It is
1645  *              an index into the lists passed to
1646  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1647  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
1648  * @param type The {@link ANeuralNetworksOperandType} of the operand. This should be
1649  *             used to specify the dimensions that were left
1650  *             unspecified when the operand was added to the
1651  *             model. All other properties of the type must be the
1652  *             same as specified in the model. If the type is the same
1653  *             as specified when the model was built, NULL can be
1654  *             passed. Neither the {@link ANeuralNetworksOperandType}
1655  *             nor the dimensions it points to need to outlive the call
1656  *             to {@link ANeuralNetworksExecution_setOutputFromMemory}.
1657  *             Since NNAPI feature level 3, the output operand can have unspecified
1658  *             dimensions or rank to be deduced dynamically during the execution.
1659  *             However, the user must provide a large enough memory. The user
1660  *             can retrieve the output dimensional information after the execution
1661  *             by {@link ANeuralNetworksExecution_getOutputOperandRank} and
1662  *             {@link ANeuralNetworksExecution_getOutputOperandDimensions}.
1663  * @param memory The memory where the data is to be stored.
1664  * @param offset This specifies the location of the data within the memory.
1665  *               The offset is in bytes from the start of memory.
1666  * @param length The size of the data value in bytes plus any end padding.
1667  *
1668  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1669  *         name is not recognized or the buffer is too small for the output.
1670  */
1671 int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
1672                                                  const ANeuralNetworksOperandType* type,
1673                                                  const ANeuralNetworksMemory* memory, size_t offset,
1674                                                  size_t length) __NNAPI_INTRODUCED_IN(27);
1675 
1676 /**
1677  * Schedule asynchronous evaluation of the execution.
1678  *
1679  * <p>Schedules asynchronous evaluation of the execution. Once the execution
1680  * has completed and the outputs are ready to be consumed, the returned event
1681  * will be signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that
1682  * event.
1683  * </p>
1684  *
1685  * ANeuralNetworksEvent_wait must be called to recuperate the resources used
1686  * by the execution.
1687  *
1688  * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,
1689  * and the execution is not able to complete before the timeout duration is
1690  * exceeded, then execution may be aborted, in which case
1691  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned through
1692  * {@link ANeuralNetworksExecution_startCompute} or
1693  * {@link ANeuralNetworksEvent_wait} on the event object. If the device has a
1694  * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that
1695  * is lower than {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout
1696  * duration hint will be ignored.
1697  *
1698  * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
1699  * the condition model does not output false within the loop timeout duration,
1700  * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
1701  * will be returned through {@link ANeuralNetworksEvent_wait} on the event
1702  * object.
1703  *
1704  * If the device can detect before the execution has started that the execution
1705  * will not complete within the timeout duration, the device may choose to skip
1706  * the execution and instead return ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}.
1707  *
1708  * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
1709  * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
1710  * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
1711  * the execution is in the completed state.
1712  *
1713  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1714  *
1715  * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
1716  * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
1717  * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
1718  * asynchronous execution with dependencies.
1719  *
1720  * Available since NNAPI feature level 1.
1721  *
1722  * @param execution The execution to be scheduled and executed.
1723  * @param event The event that will be signaled on completion. event is set to
1724  *              NULL if there's an error.
1725  *
1726  * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled.
1727  */
1728 int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution,
1729                                           ANeuralNetworksEvent** event) __NNAPI_INTRODUCED_IN(27);
1730 
1731 /**
1732  * Set the maximum expected duration of the specified execution.
1733  *
1734  * If the device is not able to complete the execution within the specified
1735  * duration, the execution may be aborted. The timeout duration begins at a
1736  * call to one of:
1737  * - {@link ANeuralNetworksExecution_burstCompute}
1738  * - {@link ANeuralNetworksExecution_compute}
1739  * - {@link ANeuralNetworksExecution_startCompute}
1740  * - {@link ANeuralNetworksExecution_startComputeWithDependencies}
1741  *
1742  * This timeout duration acts as a hint to drivers, and can be used to both free
1743  * up compute resources within the driver and return control back to the
1744  * application quicker than is possible without the hint. It enables drivers
1745  * that are able to estimate how long an execution will take to abort the
1746  * execution before it has even started if the driver believes the execution
1747  * cannot be completed within the timeout duration. Similarly, it enables
1748  * drivers to abort an ongoing execution if it is taking too long. However, this
1749  * call does not guarantee that the execution will complete or abort within the
1750  * timeout duration.
1751  *
1752  * By default (i.e., unless ANeuralNetworksExecution_setTimeout is called),
1753  * the timeout duration for execution is considered infinite.
1754  *
1755  * The {@link ANeuralNetworksExecution} must have been created from an
1756  * {@link ANeuralNetworksCompilation} which in turn was created from
1757  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
1758  * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the
1759  * device has a feature level reported by
1760  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
1761  * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will
1762  * be ignored.
1763  *
1764  * This function may only be invoked when the execution is in the preparation state.
1765  *
1766  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1767  *
1768  * @param execution The execution to be modified.
1769  * @param duration The maximum amount of time in nanoseconds that is expected to
1770  *     be spent executing a model. If this duration is exceeded, the execution
1771  *     may be aborted. If set to 0, the timeout duration is considered infinite.
1772  *
1773  * @return ANEURALNETWORKS_NO_ERROR if successful.
1774  *
1775  * Available since NNAPI feature level 4.
1776  */
1777 int ANeuralNetworksExecution_setTimeout(ANeuralNetworksExecution* execution, uint64_t duration)
1778         __NNAPI_INTRODUCED_IN(30);
1779 
1780 /**
1781  * Set the maximum duration of WHILE loops in the specified execution.
1782  *
1783  * This is a fuzzy per-loop timeout intended to prevent infinite loops.
1784  *
1785  * If a WHILE loop condition model does not output false within the specified
1786  * duration, the execution will be aborted.
1787  *
1788  * See {@link ANeuralNetworks_getDefaultLoopTimeout} and
1789  * {@link ANeuralNetworks_getMaximumLoopTimeout} for the default
1790  * and maximum timeout values.
1791  *
1792  * This function may only be invoked when the execution is in the preparation state.
1793  *
1794  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1795  *
1796  * @param execution The execution to be modified.
1797  * @param duration The maximum amount of time in nanoseconds that can be spent
1798  *     executing a WHILE loop. If the specified duration value exceeds the value
1799  *     produced by {@link ANeuralNetworks_getMaximumLoopTimeout}, it will be
1800  *     overridden by that value.
1801  *
1802  * @return ANEURALNETWORKS_NO_ERROR if successful.
1803  *         ANEURALNETWORKS_BAD_STATE if execution has started.
1804  *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
1805  *
1806  * Available since NNAPI feature level 4.
1807  */
1808 int ANeuralNetworksExecution_setLoopTimeout(ANeuralNetworksExecution* execution, uint64_t duration)
1809         __NNAPI_INTRODUCED_IN(30);
1810 
1811 /**
1812  * Get the default timeout value for WHILE loops.
1813  *
1814  * @return The default timeout value in nanoseconds.
1815  *
1816  * Available since NNAPI feature level 4.
1817  */
1818 uint64_t ANeuralNetworks_getDefaultLoopTimeout() __NNAPI_INTRODUCED_IN(30);
1819 
1820 /**
1821  * Get the maximum timeout value for WHILE loops.
1822  *
1823  * @return The maximum timeout value in nanoseconds.
1824  *
1825  * Available since NNAPI feature level 4.
1826  */
1827 uint64_t ANeuralNetworks_getMaximumLoopTimeout() __NNAPI_INTRODUCED_IN(30);
1828 
1829 /**
1830  * Waits until the execution completes.
1831  *
1832  * More than one thread can wait on an event. When the execution completes,
1833  * all threads will be released.
1834  *
1835  * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution
1836  * corresponding to this event, and the execution is not able to complete
1837  * before the duration is exceeded, the execution may be aborted, in which case
1838  * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned here.
1839  *
1840  * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
1841  * the condition model does not output false within the loop timeout duration,
1842  * the execution will be aborted, and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
1843  * will be returned here.
1844  *
1845  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1846  *
1847  * Available since NNAPI feature level 1.
1848  *
1849  * @param event The event that will be signaled on completion.
1850  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
1851  *         ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot
1852  *         be properly mapped.
1853  */
1854 int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __NNAPI_INTRODUCED_IN(27);
1855 
1856 /**
1857  * Destroys the event.
1858  *
1859  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
1860  *
1861  * Available since NNAPI feature level 1.
1862  *
1863  * @param event The event object to be destroyed. Passing NULL is acceptable and
1864  *              results in no operation.
1865  */
1866 void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __NNAPI_INTRODUCED_IN(27);
1867 
1868 /**
1869  * Create a {@link ANeuralNetworksEvent} from a sync_fence file descriptor.
1870  *
1871  * The newly created ANeuralNetworksEvent does not take ownership of the provided sync_fence_fd,
1872  * it will instead dup the provided sync_fence_fd and own the duplicate.
1873  *
1874  * @param sync_fence_fd The sync_fence file descriptor.
1875  * @param event The newly created object or NULL if unsuccessful.
1876  *
1877  * @return ANEURALNETWORKS_NO_ERROR if successful.
1878  *
1879  * Available since NNAPI feature level 4.
1880  */
1881 int ANeuralNetworksEvent_createFromSyncFenceFd(int sync_fence_fd, ANeuralNetworksEvent** event)
1882         __NNAPI_INTRODUCED_IN(30);
1883 
1884 /**
1885  * Get sync_fence file descriptor from the event.
1886  *
1887  * If the ANeuralNetworksEvent is not backed by a sync fence, the sync_fence_fd
1888  * will be set to -1, and ANEURALNETWORKS_BAD_DATA will be returned.
1889  *
1890  * See {@link ANeuralNetworksEvent_createFromSyncFenceFd} and
1891  * {@link ANeuralNetworksExecution_startComputeWithDependencies} to see how to create
1892  * an event backed by a sync fence.
1893  *
1894  * The user takes ownership of the returned fd, and must close the returned file descriptor when
1895  * it is no longer needed.
1896  *
1897  * @param event An event that is backed by a sync fence.
1898  * @param sync_fence_fd The sync_fence file descriptor. The file descriptor will
1899  *                      be set to -1 if there is an error.
1900  *
1901  * @return ANEURALNETWORKS_NO_ERROR if successful.
1902  *
1903  * Available since NNAPI feature level 4.
1904  */
1905 int ANeuralNetworksEvent_getSyncFenceFd(const ANeuralNetworksEvent* event, int* sync_fence_fd)
1906         __NNAPI_INTRODUCED_IN(30);
1907 
1908 /**
1909  * Schedule asynchronous evaluation of the execution with dependencies.
1910  *
1911  * The execution will wait for all the depending events to be signaled before
1912  * starting the evaluation. Once the execution has completed and the outputs
1913  * are ready to be consumed, the returned event will be signaled. Depending on which
1914  * devices are handling the execution, the event could be backed by a sync fence.
1915  * Use {@link ANeuralNetworksEvent_wait} to wait for that event.
1916  *
1917  * ANeuralNetworksEvent_wait must be called to recurperate the resources used
1918  * by the execution.
1919  *
1920  * If parts of the execution are scheduled on devices that do not support fenced execution,
1921  * the function call may wait for such parts to finish before returning.
1922  *
1923  * The function will return an error if any of the events in dependencies is already in a bad
1924  * state. After the execution is scheduled, if any of the events in dependencies does not complete
1925  * normally, the execution will fail, and {@link ANeuralNetworksEvent_wait} on the returned
1926  * event will return an error.
1927  *
1928  * The function will return an error if any of the execution outputs has a tensor operand type
1929  * that is not fully specified.
1930  *
1931  * The function can be passed a timeout duration in nanoseconds. This timeout
1932  * duration acts as a hint to drivers in the same way that the timeout durations
1933  * in {@link ANeuralNetworksCompilation_setTimeout} and {@link
1934  * ANeuralNetworksExecution_setTimeout} act as hints to drivers. The duration
1935  * begins when all waitFor sync fences have been signaled, and can be used
1936  * together with {@link ANeuralNetworksExecution_setTimeout} which specifies the
1937  * maximum timeout duration beginning at the call to
1938  * {@link ANeuralNetworksExecution_startComputeWithDependencies}.
1939  * If the duration is non-zero, the {@link ANeuralNetworksExecution} must have been created
1940  * from an {@link ANeuralNetworksCompilation} which in turn was created from
1941  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
1942  * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If either
1943  * the timeout duration from {@link ANeuralNetworksExecution_setTimeout} or the
1944  * timeout duration passed to this call is exceeded, the execution may be
1945  * aborted, in which case ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be
1946  * returned through {@link ANeuralNetworksExecution_startComputeWithDependencies}
1947  * or {@link ANeuralNetworksEvent_wait} on the event object. If the device has a
1948  * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that
1949  * is lower than {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration
1950  * hints will be ignored.
1951  *
1952  * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
1953  * the condition model does not output false within the loop timeout duration,
1954  * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
1955  * will be returned through {@link ANeuralNetworksEvent_wait} on the event
1956  * object.
1957  *
1958  * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
1959  * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
1960  * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
1961  * the execution is in the completed state.
1962  *
1963  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1964  *
1965  * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
1966  * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
1967  * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
1968  *
1969  * @param execution The execution to be scheduled and executed.
1970  * @param dependencies A set of depending events. The actual evaluation will not start
1971  *                     until all the events are signaled.
1972  * @param num_dependencies The number of events in the dependencies set.
1973  * @param duration The maximum amount of time in nanoseconds that is expected to
1974  *                 be spent executing the model after all dependencies are
1975  *                 signaled. If set to 0, the timeout duration is considered
1976  *                 infinite.
1977  * @param event The event that will be signaled on completion. event is set to
1978  *              NULL if there's an error.
1979  *
1980  * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled.
1981  *
1982  * Available since NNAPI feature level 4.
1983  */
1984 int ANeuralNetworksExecution_startComputeWithDependencies(
1985         ANeuralNetworksExecution* execution, const ANeuralNetworksEvent* const* dependencies,
1986         uint32_t num_dependencies, uint64_t duration, ANeuralNetworksEvent** event)
1987         __NNAPI_INTRODUCED_IN(30);
1988 
1989 /**
1990  * Get the NNAPI runtime feature level.
1991  *
1992  * Since API level 31 (NNAPI feature level 5), the NNAPI runtime (libneuralnetworks.so) and its
1993  * API specification can be updated between Android API releases.
1994  *
1995  * On Android devices with API level 31 and newer, for NNAPI runtime feature discovery,
1996  * the NNAPI runtime feature level must be used instead of the Android device API level.
1997  *
1998  * On Android devices with API level 30 and older, the Android API level of the Android
1999  * device must be used for NNAPI runtime feature discovery. Enum values in
2000  * {@link FeatureLevelCode} from feature level 1 to 5 have their corresponding Android
2001  * API levels listed in their documentation, and each such enum value equals the corresponding
2002  * API level. This allows using the Android API level as the feature level.
2003  * This mapping between enum value and Android API level does not exist for feature levels
2004  * after NNAPI feature level 5 and API levels after S (31).
2005  *
2006  * Example usage:
2007  * int device_api_level = android_get_device_api_level();
2008  * int64_t runtime_feature_level = (device_api_level < __ANDROID_API_S__) ?
2009  *                                  device_api_level : ANeuralNetworks_getRuntimeFeatureLevel();
2010  *
2011  * Runtime feature level is closely related to NNAPI device feature level
2012  * ({@link ANeuralNetworksDevice_getFeatureLevel}), which indicates an NNAPI device feature level
2013  * (the most advanced NNAPI specification and features that the driver implements).
2014  * This function expresses NNAPI runtime feature level, which indicates the most advanced
2015  * NNAPI specification and features the runtime implements. An NNAPI device feature level is
2016  * always less than or equal to the runtime feature level.
2017  *
2018  * This function returns a {@link FeatureLevelCode} enum value,
2019  * which is the NNAPI specification version that this NNAPI runtime implements.
2020  * It is NOT an Android API level.
2021  *
2022  * Available since NNAPI feature level 5.
2023  */
2024 int64_t ANeuralNetworks_getRuntimeFeatureLevel() __NNAPI_INTRODUCED_IN(31);
2025 
2026 /**
2027  * Specifies whether the {@link ANeuralNetworksExecution} is able to accept padded input and output
2028  * buffers and memory objects.
2029  *
2030  * By default, the input and output buffers and memory objects of {@link ANeuralNetworksExecution}
2031  * do not allow padding.
2032  *
2033  * Setting the execution to accept padded input and output buffers and memory objects enables the
2034  * length argument of {@link ANeuralNetworksExecution_setInput},
2035  * {@link ANeuralNetworksExecution_setInputFromMemory}, {@link ANeuralNetworksExecution_setOutput},
2036  * and {@link ANeuralNetworksExecution_setOutputFromMemory} to be greater than the raw size of the
2037  * operand (i.e. the size of an element multiplied by the number of elements). The extra bytes
2038  * at the end of the buffer or memory region may be used by the driver to access data in chunks,
2039  * for efficiency.
2040  *
2041  * This method must not be called after {@link ANeuralNetworksExecution_setInput},
2042  * {@link ANeuralNetworksExecution_setInputFromMemory}, {@link ANeuralNetworksExecution_setOutput},
2043  * or {@link ANeuralNetworksExecution_setOutputFromMemory}.
2044  *
2045  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2046  *
2047  * @param execution The execution to be modified.
2048  * @param enable 'true' if the execution is to be able to accept padded input and output buffers
2049  *               and memory objects, 'false' if not.
2050  *
2051  * @return ANEURALNETWORKS_NO_ERROR if successful.
2052  *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
2053  *         ANEURALNETWORKS_BAD_STATE if {@link ANeuralNetworksExecution_setInput},
2054  *         {@link ANeuralNetworksExecution_setInputFromMemory},
2055  *         {@link ANeuralNetworksExecution_setOutput}, or
2056  *         {@link ANeuralNetworksExecution_setOutputFromMemory} has been called on the execution.
2057  *
2058  * Available since NNAPI feature level 5.
2059  */
2060 int ANeuralNetworksExecution_enableInputAndOutputPadding(ANeuralNetworksExecution* execution,
2061                                                          bool enable) __NNAPI_INTRODUCED_IN(31);
2062 
2063 /**
2064  * Get the preferred buffer and memory alignment of an input to an execution created from a
2065  * particular compilation.
2066  *
2067  * The user may use the returned alignment value to guide the layout of the input buffer or memory
2068  * pool. To achieve the best performance, make sure the address of the buffer passed in
2069  * {@link ANeuralNetworksExecution_setInput}, or the offset value passed in
2070  * {@link ANeuralNetworksExecution_setInputFromMemory}, is a multiple of the perferred alignment
2071  * value of the same input. A driver may choose to allocate a separate buffer and do memory copying
2072  * if the provided buffer or memory does not satisfy the preferred alignment.
2073  *
2074  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2075  *
2076  * @param compilation The compilation object. It must already have been finished by calling
2077  *                    {@link ANeuralNetworksCompilation_finish}.
2078  * @param index The index of the input argument we are referencing from the compilation. It is
2079  *              an index into the inputs list passed to
2080  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2081  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2082  * @param alignment The returned preferred alignment in bytes. It will be a power of 2.
2083  *
2084  * @return ANEURALNETWORKS_NO_ERROR if successful.
2085  *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
2086  *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2087  *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2088  *
2089  * Available since NNAPI feature level 5.
2090  */
2091 int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
2092         const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment)
2093         __NNAPI_INTRODUCED_IN(31);
2094 
2095 /**
2096  * Get the preferred buffer and memory end padding of an input to an execution created from a
2097  * particular compilation.
2098  *
2099  * The user may use the returned padding value to guide the layout of the input buffer or memory
2100  * pool. To achieve the best performance, make sure the length value passed in
2101  * {@link ANeuralNetworksExecution_setInput} or
2102  * {@link ANeuralNetworksExecution_setInputFromMemory} is greater than or equal to the raw size of
2103  * the input (i.e. the size of an element multiplied by the number of elements) rounding up to
2104  * a multiple of the perferred padding value of the same input. A driver may choose to allocate a
2105  * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
2106  * the preferred padding.
2107  *
2108  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2109  * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
2110  * {@link ANeuralNetworksExecution_setInput}, and
2111  * {@link ANeuralNetworksExecution_setInputFromMemory} for information on passing
2112  * input buffer or memory padding to the driver.
2113  *
2114  * @param compilation The compilation object. It must already have been finished by calling
2115  *                    {@link ANeuralNetworksCompilation_finish}.
2116  * @param index The index of the input argument we are referencing from the compilation. It is
2117  *              an index into the inputs list passed to
2118  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2119  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2120  * @param padding The returned preferred padding in bytes. It will be a power of 2.
2121  *
2122  * @return ANEURALNETWORKS_NO_ERROR if successful.
2123  *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
2124  *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2125  *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2126  *
2127  * Available since NNAPI feature level 5.
2128  */
2129 int ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
2130         const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding)
2131         __NNAPI_INTRODUCED_IN(31);
2132 
2133 /**
2134  * Get the preferred buffer and memory alignment of an output to an execution created from a
2135  * particular compilation.
2136  *
2137  * The user may use the returned alignment value to guide the layout of the output buffer or memory
2138  * pool. To achieve the best performance, make sure the address of the buffer passed in
2139  * {@link ANeuralNetworksExecution_setOutput}, or the offset value passed in
2140  * {@link ANeuralNetworksExecution_setOutputFromMemory}, is a multiple of the perferred alignment
2141  * value of the same output. A driver may choose to allocate a separate buffer and do memory copying
2142  * if the provided buffer or memory does not satisfy the preferred alignment.
2143  *
2144  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2145  *
2146  * @param compilation The compilation object. It must already have been finished by calling
2147  *                    {@link ANeuralNetworksCompilation_finish}.
2148  * @param index The index of the output argument we are referencing from the compilation. It is
2149  *              an index into the outputs list passed to
2150  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2151  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2152  * @param alignment The returned perferred alignment in bytes. It will be a power of 2.
2153  *
2154  * @return ANEURALNETWORKS_NO_ERROR if successful.
2155  *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
2156  *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2157  *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2158  *
2159  * Available since NNAPI feature level 5.
2160  */
2161 int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
2162         const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment)
2163         __NNAPI_INTRODUCED_IN(31);
2164 
2165 /**
2166  * Get the preferred memory end padding of an output to an execution created from a particular
2167  * compilation.
2168  *
2169  * The user may use the returned padding value to guide the layout of the output buffer or memory
2170  * pool. To achieve the best performance, make sure the length value passed in
2171  * {@link ANeuralNetworksExecution_setOutput} or
2172  * {@link ANeuralNetworksExecution_setOutputFromMemory} is greater than or equal to the raw size of
2173  * the output (i.e. the size of an element multiplied by the number of elements) rounding up to
2174  * a multiple of the perferred padding value of the same output. A driver may choose to allocate a
2175  * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
2176  * the preferred padding.
2177  *
2178  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2179  * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
2180  * {@link ANeuralNetworksExecution_setOutput}, and
2181  * {@link ANeuralNetworksExecution_setOutputFromMemory} for information on passing
2182  * output buffer or memory padding to the driver.
2183  *
2184  * @param compilation The compilation object. It must already have been finished by calling
2185  *                    {@link ANeuralNetworksCompilation_finish}.
2186  * @param index The index of the output argument we are referencing from the compilation. It is
2187  *              an index into the outputs list passed to
2188  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2189  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2190  * @param padding The returned perferred padding in bytes. It will be a power of 2.
2191  *
2192  * @return ANEURALNETWORKS_NO_ERROR if successful.
2193  *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
2194  *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2195  *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2196  *
2197  * Available since NNAPI feature level 5.
2198  */
2199 int ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
2200         const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding)
2201         __NNAPI_INTRODUCED_IN(31);
2202 
2203 /**
2204  * Specifies whether the {@link ANeuralNetworksExecution} can be reused for multiple computations.
2205  *
2206  * By default, the {@link ANeuralNetworksExecution} is not reusable.
2207  *
2208  * Setting the execution to be reusable enables multiple computations to be scheduled and evaluated
2209  * on the same execution sequentially, either by means of
2210  * {@link ANeuralNetworksExecution_burstCompute}, {@link ANeuralNetworksExecution_compute},
2211  * {@link ANeuralNetworksExecution_startCompute} or
2212  * {@link ANeuralNetworksExecution_startComputeWithDependencies}: The application may schedule and
2213  * evaluate a computation again from the completed state of a reusable execution.
2214  *
2215  * This function may only be invoked when the execution is in the preparation state.
2216  *
2217  * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
2218  *
2219  * @param execution The execution to be modified.
2220  * @param reusable 'true' if the execution is to be reusable, 'false' if not.
2221  *
2222  * @return ANEURALNETWORKS_NO_ERROR if successful.
2223  *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
2224  *         ANEURALNETWORKS_BAD_STATE if the execution is not in the preparation state.
2225  *
2226  * Available since NNAPI feature level 5.
2227  */
2228 int ANeuralNetworksExecution_setReusable(ANeuralNetworksExecution* execution, bool reusable)
2229         __NNAPI_INTRODUCED_IN(31);
2230 
2231 __END_DECLS
2232 
2233 #endif  // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
2234 
2235 #undef __NNAPI_INTRODUCED_IN
2236 
2237 /** @} */
2238