/aosp12/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/ |
H A D | bidirectional_sequence_lstm.mod.py | 212 proj_clip = Float32Scalar("proj_clip", 0.0) 266 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
H A D | bidirectional_sequence_lstm_cifg_peephole.mod.py | 212 proj_clip = Float32Scalar("proj_clip", 0.0) 266 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
H A D | bidirectional_sequence_lstm_float16_batch_major.mod.py | 212 proj_clip = Float16Scalar("proj_clip", 0.0) 266 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
H A D | bidirectional_sequence_lstm_float16_batch_major_merge_outputs.mod.py | 212 proj_clip = Float16Scalar("proj_clip", 0.0) 266 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
H A D | bidirectional_sequence_lstm_merge_outputs.mod.py | 211 proj_clip = Float32Scalar("proj_clip", 0.0) 265 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
H A D | bidirectional_sequence_lstm_aux_input.mod.py | 214 proj_clip = Float32Scalar("proj_clip", 0.0) 268 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
H A D | bidirectional_sequence_lstm_float16_batch_major_aux_input.mod.py | 215 proj_clip = Float16Scalar("proj_clip", 0.0) 269 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
H A D | bidirectional_sequence_lstm_norm_fw_output.mod.py | 213 proj_clip = Float32Scalar("proj_clip", 0.0) 267 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
/aosp12/packages/modules/NeuralNetworks/common/operations/ |
H A D | LayerNormLSTMTest.cpp | 88 bool use_projection_bias, float cell_clip, float proj_clip, in LayerNormLSTMOpModel() argument 98 proj_clip_(proj_clip) { in LayerNormLSTMOpModel()
|
H A D | LSTM.cpp | 94 params_.proj_clip = getScalarDataWithDefault<float>(projClipOperand, 0.0f); in LSTMCell() 98 params_.proj_clip = in LSTMCell() 156 NN_CHECK(params->proj_clip >= 0); in CheckInputTensorDimensions() 979 if (params.proj_clip > 0.0) { in LSTMStep() 981 params.proj_clip); in LSTMStep()
|
H A D | LSTMTest.cpp | 80 bool use_projection_bias, float cell_clip, float proj_clip, in LSTMOpModel() argument 90 proj_clip_(proj_clip) { in LSTMOpModel()
|
H A D | LSTM.h | 35 float proj_clip; member
|
H A D | BidirectionalSequenceLSTM.cpp | 178 params_.proj_clip = getScalarDataWithDefault<float>(projOperand, 0.0f); in BidirectionalSequenceLSTM() 182 params_.proj_clip = in BidirectionalSequenceLSTM()
|
H A D | UnidirectionalSequenceLSTM.cpp | 108 params.proj_clip = static_cast<float>(context->getInputValue<T>(kProjClipParam)); in getLSTMParams()
|
/aosp12/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/ |
H A D | bidirectional_sequence_lstm.mod.py | 240 proj_clip = Float32Scalar("proj_clip", 0.0) 296 proj_clip,
|
H A D | bidirectional_sequence_lstm_state_output.mod.py | 228 proj_clip = Float32Scalar("proj_clip", 0.0) 284 proj_clip,
|
/aosp12/hardware/interfaces/neuralnetworks/1.0/ |
H A D | types.hal | 988 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
|
/aosp12/hardware/interfaces/neuralnetworks/1.2/ |
H A D | types.hal | 1281 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 2496 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 4543 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
|
/aosp12/hardware/interfaces/neuralnetworks/1.3/ |
H A D | types.hal | 1280 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 2599 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 4805 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
|
/aosp12/packages/modules/NeuralNetworks/tools/api/ |
H A D | types.spec | 1718 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
|