75 const uint32_t numBatches = inputShape[0];
76 const uint32_t inputSize = inputShape[1];
77 const uint32_t outputSize = outputStateInShape[1];
78 const uint32_t numUnits = cellStateInShape[1];
87 std::unique_ptr<Decoder<float>> inputDecoder =
89 std::unique_ptr<Decoder<float>> outputStateInDecoder =
91 std::unique_ptr<Decoder<float>> cellStateInDecoder =
95 std::unique_ptr<Decoder<float>> outputStateOutDecoder =
97 std::unique_ptr<Decoder<float>> cellStateOutDecoder =
99 std::unique_ptr<Decoder<float>> outputDecoder =
103 std::unique_ptr<Encoder<float>> outputStateOutEncoder =
105 std::unique_ptr<Encoder<float>> cellStateOutEncoder =
107 std::unique_ptr<Encoder<float>> outputEncoder =
112 m_InputToForgetWeightsTensor->GetTensorInfo(), m_InputToForgetWeightsTensor->GetConstTensor<
void>());
114 m_InputToCellWeightsTensor->GetTensorInfo(), m_InputToCellWeightsTensor->GetConstTensor<
void>());
116 m_InputToOutputWeightsTensor->GetTensorInfo(), m_InputToOutputWeightsTensor->GetConstTensor<
void>());
119 m_RecurrentToForgetWeightsTensor->GetTensorInfo(),
120 m_RecurrentToForgetWeightsTensor->GetConstTensor<
void>());
122 m_RecurrentToCellWeightsTensor->GetTensorInfo(), m_RecurrentToCellWeightsTensor->GetConstTensor<
void>());
124 m_RecurrentToOutputWeightsTensor->GetTensorInfo(),
125 m_RecurrentToOutputWeightsTensor->GetConstTensor<
void>());
128 std::unique_ptr<Decoder<float>> inputToInputWeightsDecoder;
129 std::unique_ptr<Decoder<float>> recurrentToInputWeightsDecoder;
130 std::unique_ptr<Decoder<float>> inputGateBiasDecoder;
133 std::unique_ptr<Decoder<float>> cellToInputWeightsDecoder;
134 std::unique_ptr<Decoder<float>> cellToForgetWeightsDecoder;
135 std::unique_ptr<Decoder<float>> cellToOutputWeightsDecoder;
138 std::unique_ptr<Decoder<float>> projectionWeightsDecoder;
139 std::unique_ptr<Decoder<float>> projectionBiasDecoder;
142 std::unique_ptr<Decoder<float>> inputLayerNormWeightsDecoder;
143 std::unique_ptr<Decoder<float>> forgetLayerNormWeightsDecoder;
144 std::unique_ptr<Decoder<float>> cellLayerNormWeightsDecoder;
145 std::unique_ptr<Decoder<float>> outputLayerNormWeightsDecoder;
148 std::unique_ptr<Decoder<float>> forgetGateBiasDecoder;
149 std::unique_ptr<Decoder<float>> cellGateBiasDecoder;
150 std::unique_ptr<Decoder<float>> outputGateBiasDecoder;
153 const uint32_t stateTensorSize = numBatches * numUnits;
154 std::vector<int16_t> inputGateData(stateTensorSize);
155 std::vector<int16_t> cellGateData(stateTensorSize);
156 std::vector<int16_t> forgetGateData(stateTensorSize);
157 std::vector<int16_t> outputGateData(stateTensorSize);
158 std::vector<int32_t> hiddenStateData(stateTensorSize);
159 std::vector<int16_t> outputInt16Data(numBatches * outputSize);
165 armnn::TensorInfo forgetGateInfo(
167 armnn::TensorInfo outputGateInfo(
169 armnn::TensorInfo hiddenStateInfo({numBatches, numUnits},
171 m_Data.m_Parameters.m_HiddenStateScale,
172 m_Data.m_Parameters.m_HiddenStateZeroPoint);
173 armnn::TensorInfo outputInt16Info({numBatches , outputSize},
179 std::unique_ptr<Decoder<float>> inputGateDecoder =
181 std::unique_ptr<Decoder<float>> cellGateDecoder =
183 std::unique_ptr<Decoder<float>> forgetGateDecoder =
185 std::unique_ptr<Decoder<float>> outputGateDecoder =
187 std::unique_ptr<Decoder<float>> hiddenStateDecoder =
190 std::unique_ptr<Encoder<float>> inputGateEncoder =
192 std::unique_ptr<Encoder<float>> cellGateEncoder =
194 std::unique_ptr<Encoder<float>> forgetGateEncoder =
196 std::unique_ptr<Encoder<float>> outputGateEncoder =
198 std::unique_ptr<Encoder<float>> hiddenStateEncoder =
202 std::unique_ptr<Decoder<float>> outputInt16Decoder =
204 std::unique_ptr<Encoder<float>> outputInt16Encoder =
211 m_InputToInputWeightsTensor->GetTensorInfo(), m_InputToInputWeightsTensor->GetConstTensor<
void>());
212 recurrentToInputWeightsDecoder =
MakeDecoder<float>(m_RecurrentToInputWeightsTensor->GetTensorInfo(),
213 m_RecurrentToInputWeightsTensor->GetConstTensor<
void>());
221 m_CellToInputWeightsTensor->GetTensorInfo(), m_CellToInputWeightsTensor->GetConstTensor<
void>());
224 m_CellToForgetWeightsTensor->GetTensorInfo(), m_CellToForgetWeightsTensor->GetConstTensor<
void>());
226 m_CellToOutputWeightsTensor->GetTensorInfo(), m_CellToOutputWeightsTensor->GetConstTensor<
void>());
229 if (projectionEnabled)
232 m_ProjectionWeightsTensor->GetTensorInfo(), m_ProjectionWeightsTensor->GetConstTensor<
void>());
233 if (m_ProjectionBiasTensor)
236 m_ProjectionBiasTensor->GetTensorInfo(), m_ProjectionBiasTensor->GetConstTensor<
void>());
240 if (layerNormEnabled)
244 inputLayerNormWeightsDecoder =
MakeDecoder<float>(m_InputLayerNormWeightsTensor->GetTensorInfo(),
245 m_InputLayerNormWeightsTensor->GetConstTensor<
void>());
249 m_InputLayerNormWeightsTensor->GetTensorInfo().GetQuantizationScale() / 1024, 0);
251 inputGateBiasTensorInfo, m_InputGateBiasTensor->GetConstTensor<
void>());
255 m_ForgetLayerNormWeightsTensor->GetTensorInfo(),
256 m_ForgetLayerNormWeightsTensor->GetConstTensor<
void>());
258 m_CellLayerNormWeightsTensor->GetTensorInfo(), m_CellLayerNormWeightsTensor->GetConstTensor<
void>());
260 m_OutputLayerNormWeightsTensor->GetTensorInfo(),
261 m_OutputLayerNormWeightsTensor->GetConstTensor<
void>());
265 m_ForgetLayerNormWeightsTensor->GetTensorInfo().GetQuantizationScale() / 1024, 0);
267 forgetGateBiasTensorInfo, m_ForgetGateBiasTensor->GetConstTensor<
void>());
270 m_CellLayerNormWeightsTensor->GetTensorInfo().GetQuantizationScale() / 1024, 0);
272 cellGateBiasTensorInfo, m_CellBiasTensor->GetConstTensor<
void>());
275 m_OutputLayerNormWeightsTensor->GetTensorInfo().GetQuantizationScale() / 1024, 0);
277 outputGateBiasTensorInfo, m_OutputGateBiasTensor->GetConstTensor<
void>());
283 ZeroVector(*inputGateEncoder, stateTensorSize);
285 ZeroVector(*forgetGateEncoder, stateTensorSize);
286 ZeroVector(*cellGateEncoder, stateTensorSize);
287 ZeroVector(*outputGateEncoder, stateTensorSize);
288 ZeroVector(*hiddenStateEncoder, stateTensorSize);
294 numUnits, inputSize, *inputDecoder, numBatches, *inputGateEncoder);
298 numUnits, inputSize, *inputDecoder, numBatches, *forgetGateEncoder);
301 numUnits, inputSize, *inputDecoder, numBatches, *cellGateEncoder);
304 numUnits, inputSize, *inputDecoder, numBatches, *outputGateEncoder);
310 numUnits, outputSize, *outputStateInDecoder, numBatches, *inputGateEncoder);
314 numUnits, outputSize, *outputStateInDecoder, numBatches, *forgetGateEncoder);
317 numUnits, outputSize, *outputStateInDecoder, numBatches, *cellGateEncoder);
320 numUnits, outputSize, *outputStateInDecoder, numBatches, *outputGateEncoder);
328 numUnits, *cellStateInDecoder, numBatches, *inputGateEncoder);
331 if (layerNormEnabled)
334 m_InputLayerNormWeightsTensor->GetTensorInfo().GetQuantizationScale() *
339 *inputGateEncoder, numUnits, numBatches, m_LayerNormEpsilon);
344 numUnits, *inputGateDecoder, numBatches, *inputGateEncoder);
346 inputGateInfo.SetQuantizationScale(1.f / 4096);
350 numUnits, *inputGateDecoder, numBatches, *inputGateEncoder);
359 Activation(*inputGateDecoder, *inputGateEncoder,
360 TensorInfo({numUnits, numBatches}, internalType),
370 *cellStateInDecoder, numBatches, *forgetGateEncoder);
373 if (layerNormEnabled)
377 m_ForgetLayerNormWeightsTensor->GetTensorInfo().GetQuantizationScale() *
384 *forgetGateEncoder, numUnits, numBatches, m_LayerNormEpsilon);
390 numUnits, *forgetGateDecoder, numBatches, *forgetGateEncoder);
394 forgetGateInfo.SetQuantizationScale(1.f / 4096);
398 numUnits, *forgetGateDecoder, numBatches, *forgetGateEncoder);
408 Activation(*forgetGateDecoder, *forgetGateEncoder,
409 TensorInfo({numUnits, numBatches}, internalType),
415 if (layerNormEnabled)
418 m_CellLayerNormWeightsTensor->GetTensorInfo().GetQuantizationScale() *
427 numUnits, *cellGateDecoder, numBatches, *cellGateEncoder);
429 cellGateInfo.SetQuantizationScale(1.f / 4096);
433 numUnits, *cellGateDecoder, numBatches, *cellGateEncoder);
442 Activation(*cellGateDecoder, *cellGateEncoder,
443 TensorInfo({numUnits, numBatches}, internalType),
452 Sub1Vector(*forgetGateDecoder, stateTensorSize, *forgetGateEncoder);
454 *cellGateDecoder, *forgetGateDecoder, stateTensorSize, *cellStateOutEncoder);
459 *cellGateDecoder, *inputGateDecoder, stateTensorSize, *cellStateOutEncoder);
463 if (
m_Data.m_Parameters.m_CellClip > 0.0)
465 ClipVector(*cellStateOutDecoder, stateTensorSize,
m_Data.m_Parameters.m_CellClip, *cellStateOutEncoder);
472 numUnits, *cellStateOutDecoder, numBatches, *outputGateEncoder);
475 if (layerNormEnabled)
478 m_OutputLayerNormWeightsTensor->GetTensorInfo().GetQuantizationScale() *
487 numBatches, *outputGateEncoder);
489 outputGateInfo.SetQuantizationScale(1.f / 4096);
492 VectorBatchVectorAdd(*outputGateBiasDecoder, numUnits, *outputGateDecoder, numBatches, *outputGateEncoder);
501 Activation(*outputGateDecoder, *outputGateEncoder,
502 TensorInfo({numUnits, numBatches}, internalType),
508 Activation(*cellStateOutDecoder, *cellGateEncoder,
509 TensorInfo({numUnits, numBatches}, internalType),
516 if (
m_Data.m_Parameters.m_ProjectionEnabled)
518 if (m_ProjectionBiasTensor)
524 numBatches, *outputInt16Encoder);
526 CopyVector(*outputInt16Decoder, numBatches * outputSize, *outputEncoder);
528 if (
m_Data.m_Parameters.m_ProjectionClip > 0.0)
530 ClipVector(*outputDecoder, numBatches * outputSize,
m_Data.m_Parameters.m_ProjectionClip, *outputEncoder);
536 CopyVector(*hiddenStateDecoder, numBatches * outputSize, *outputEncoder);
540 CopyVector(*outputDecoder, numBatches * outputSize, *outputStateOutEncoder);
void CopyVector(armnn::Decoder< float > &vector, uint32_t vSize, armnn::Encoder< float > &outResult)
void MeanStddevNormalization(armnn::Decoder< float > &input_vector, armnn::Encoder< float > &output_vector, uint32_t v_size, uint32_t n_batch, float normalization_epsilon)
void ClipVector(armnn::Decoder< float > &vector, uint32_t vSize, float absLimit, armnn::Encoder< float > &outResult)
std::unique_ptr< armnn::ScopedTensorHandle > AssignScopedTensorHandle(const armnn::ConstTensorHandle *ptr)
void VectorBatchVectorCwiseProduct(armnn::Decoder< float > &vector, uint32_t vSize, armnn::Decoder< float > &batchVector, uint32_t nBatch, armnn::Encoder< float > &outResult)
void VectorVectorCwiseProductAccumulate(armnn::Decoder< float > &vector1, armnn::Decoder< float > &vector2, uint32_t vSize, armnn::Encoder< float > &outResult)
void VectorBatchVectorAdd(armnn::Decoder< float > &vector, uint32_t vSize, armnn::Decoder< float > &batchVector, uint32_t nBatch, armnn::Encoder< float > &outResult)
void ZeroVector(armnn::Encoder< float > &vector, uint32_t vSize)
void VectorVectorCwiseProduct(armnn::Decoder< float > &vector1, armnn::Decoder< float > &vector2, uint32_t vSize, armnn::Encoder< float > &outResult)
void VectorBatchVectorCwiseProductAccumulate(armnn::Decoder< float > &vector, uint32_t vSize, armnn::Decoder< float > &batchVector, uint32_t nBatch, armnn::Encoder< float > &outResult)
void VectorBatchVectorAssign(armnn::Decoder< float > &vector, uint32_t vSize, uint32_t nBatch, armnn::Encoder< float > &outBatchVector)
void MatrixBatchVectorMultiplyAccumulate(armnn::Decoder< float > &matrix, uint32_t mRows, uint32_t mCols, armnn::Decoder< float > &vector, uint32_t nBatch, armnn::Encoder< float > &outResult)
void Sub1Vector(armnn::Decoder< float > &vector, uint32_t vSize, armnn::Encoder< float > &result)
#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
QLstmQueueDescriptor m_Data
RefBaseWorkload(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
RefQLstmWorkload(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
void Execute() const override
float GetQuantizationScale() const
const TensorShape & GetShape() const
int32_t GetQuantizationOffset() const
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< Decoder< T > > MakeDecoder(const TensorInfo &info, const void *data=nullptr)
std::unique_ptr< Encoder< T > > MakeEncoder(const TensorInfo &info, void *data=nullptr)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
float m_InputIntermediateScale
Input intermediate quantization scale.
bool m_PeepholeEnabled
Enable/disable peephole.
bool m_LayerNormEnabled
Enable/disable layer normalization.
bool m_ProjectionEnabled
Enable/disable the projection layer.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
LayerDescriptor m_Parameters
Contains information about TensorInfos of a layer.