14 #include <neon/test/NeonWorkloadFactoryHelper.hpp>
21 unsigned int CalcAclAxis(
unsigned int numDimensions,
unsigned int axis)
23 return (numDimensions - axis) - 1;
29 using namespace armcomputetensorutils;
54 unsigned int maxTime =
m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];
55 unsigned int batchSize =
m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];
56 unsigned int inputSize = inputLayerShape[2];
57 unsigned int outputSize = outputLayerShape[2];
58 unsigned int numUnits = cellStateLayerShape[1];
60 const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
61 const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
66 if (!
m_Data.m_Parameters.m_TimeMajor)
68 std::unique_ptr<arm_compute::NEPermute> layer(
new arm_compute::NEPermute());
71 permuteOutInfo.
SetShape(timeMajorShapeInput);
72 BuildArmComputeTensor(m_PermuteFirstOut, permuteOutInfo);
73 armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_PermuteFirstOut);
76 layer->configure(&input, &m_PermuteFirstOut, arm_compute::PermutationVector(0U,2U,1U));
77 m_Permute1.reset(layer.release());
83 for (
unsigned int i = 0; i < maxTime; ++i)
85 arm_compute::Tensor splitter_out;
86 arm_compute::Tensor concat_in;
88 auto splitterTensorInfo = inputInfo;
89 auto concatTensorInfo = outputInfo;
90 splitterTensorInfo.
SetShape({batchSize, inputSize});
91 concatTensorInfo.SetShape({batchSize, outputSize});
92 BuildArmComputeTensor(splitter_out, splitterTensorInfo);
93 BuildArmComputeTensor(concat_in, concatTensorInfo);
95 armcomputetensorutils::InitialiseArmComputeTensorEmpty(splitter_out);
96 armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_in);
99 m_SplitterOutputsTensors.push_back(std::move(splitter_out));
100 m_ConcatInputsTensors.push_back(std::move(concat_in));
103 for (
unsigned int i = 0; i < maxTime; ++i)
106 m_SplitterOutputs.push_back(&m_SplitterOutputsTensors[i]);
107 m_ConcatInputs.push_back(&m_ConcatInputsTensors[i]);
113 unsigned int numberDimensions = 3;
114 unsigned int dimension = 0;
119 unsigned int splitterDimSizes[3] = {1, batchSize, inputSize};
120 for (
unsigned int outputIdx = 0u; outputIdx < maxTime; ++outputIdx)
122 splitterDesc.
SetViewOriginCoord(outputIdx, dimension, splitterDimSizes[dimension] * outputIdx);
123 for (
unsigned int dimIdx = 0u; dimIdx < numberDimensions; ++dimIdx)
125 splitterDesc.
SetViewSize(outputIdx, dimIdx, splitterDimSizes[dimIdx]);
129 std::set<unsigned int> splitAxis =
ComputeSplitAxis(splitterDesc, timeMajorShapeInput);
131 std::unique_ptr<arm_compute::NESplit> split_layer(
new arm_compute::NESplit());
134 if (!
m_Data.m_Parameters.m_TimeMajor)
136 split_layer->configure(&m_PermuteFirstOut, m_SplitterOutputs, aclAxisSplit);
139 split_layer->configure(&input, m_SplitterOutputs, aclAxisSplit);
142 split_layer->prepare();
143 m_Splitter.reset(split_layer.release());
149 arm_compute::LSTMParams<arm_compute::ITensor> lstm_param;
151 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
152 BuildArmComputeTensor(*m_InputToForgetWeightsTensor,
m_Data.m_InputToForgetWeights->GetTensorInfo());
154 m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
155 BuildArmComputeTensor(*m_InputToCellWeightsTensor,
m_Data.m_InputToCellWeights->GetTensorInfo());
157 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
158 BuildArmComputeTensor(*m_InputToOutputWeightsTensor,
m_Data.m_InputToOutputWeights->GetTensorInfo());
160 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
161 BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor,
m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
163 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
164 BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor,
m_Data.m_RecurrentToCellWeights->GetTensorInfo());
166 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
167 BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor,
m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
169 m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
170 BuildArmComputeTensor(*m_ForgetGateBiasTensor,
m_Data.m_ForgetGateBias->GetTensorInfo());
172 m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
173 BuildArmComputeTensor(*m_CellBiasTensor,
m_Data.m_CellBias->GetTensorInfo());
175 m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
176 BuildArmComputeTensor(*m_OutputGateBiasTensor,
m_Data.m_OutputGateBias->GetTensorInfo());
179 if (!
m_Data.m_Parameters.m_CifgEnabled)
181 m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
182 BuildArmComputeTensor(*m_InputToInputWeightsTensor,
m_Data.m_InputToInputWeights->GetTensorInfo());
184 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
185 BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor,
m_Data.m_RecurrentToInputWeights->GetTensorInfo());
187 m_CellToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
188 if (
m_Data.m_CellToInputWeights !=
nullptr)
190 BuildArmComputeTensor(*m_CellToInputWeightsTensor,
m_Data.m_CellToInputWeights->GetTensorInfo());
193 m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
194 BuildArmComputeTensor(*m_InputGateBiasTensor,
m_Data.m_InputGateBias->GetTensorInfo());
196 lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
197 m_RecurrentToInputWeightsTensor.get(),
198 m_Data.m_CellToInputWeights ? m_CellToInputWeightsTensor.get() :
nullptr,
199 m_InputGateBiasTensor.get());
202 if (
m_Data.m_Parameters.m_ProjectionEnabled)
204 m_ProjectionWeightsTensor = std::make_unique<arm_compute::Tensor>();
205 BuildArmComputeTensor(*m_ProjectionWeightsTensor,
m_Data.m_ProjectionWeights->GetTensorInfo());
207 m_ProjectionBiasTensor = std::make_unique<arm_compute::Tensor>();
208 if (
m_Data.m_ProjectionBias !=
nullptr)
210 BuildArmComputeTensor(*m_ProjectionBiasTensor,
m_Data.m_ProjectionBias->GetTensorInfo());
213 lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
214 m_Data.m_ProjectionBias ? m_ProjectionBiasTensor.get() :
nullptr);
217 if (
m_Data.m_Parameters.m_PeepholeEnabled)
219 m_CellToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
220 BuildArmComputeTensor(*m_CellToForgetWeightsTensor,
m_Data.m_CellToForgetWeights->GetTensorInfo());
222 m_CellToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
223 BuildArmComputeTensor(*m_CellToOutputWeightsTensor,
m_Data.m_CellToOutputWeights->GetTensorInfo());
225 lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
228 if (
m_Data.m_Parameters.m_LayerNormEnabled)
230 m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
231 if (!
m_Data.m_Parameters.m_CifgEnabled)
233 BuildArmComputeTensor(*m_InputLayerNormWeightsTensor,
m_Data.m_InputLayerNormWeights->GetTensorInfo());
236 m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
237 BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor,
m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
239 m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
240 BuildArmComputeTensor(*m_CellLayerNormWeightsTensor,
m_Data.m_CellLayerNormWeights->GetTensorInfo());
242 m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
243 BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor,
m_Data.m_OutputLayerNormWeights->GetTensorInfo());
245 auto inputNormWeightTensor =
m_Data.m_Parameters.m_CifgEnabled ? nullptr : m_InputLayerNormWeightsTensor.get();
246 lstm_param.set_layer_normalization_params(inputNormWeightTensor,
247 m_ForgetLayerNormWeightsTensor.get(),
248 m_CellLayerNormWeightsTensor.get(),
249 m_OutputLayerNormWeightsTensor.get());
258 m_ScratchBuffer = std::make_unique<arm_compute::Tensor>();
259 if (
m_Data.m_Parameters.m_CifgEnabled)
262 BuildArmComputeTensor(*m_ScratchBuffer,
TensorInfo({batchSize, numUnits * 3}, armnnDataType));
267 BuildArmComputeTensor(*m_ScratchBuffer,
TensorInfo({batchSize, numUnits * 4}, armnnDataType));
271 float cell_threshold =
m_Data.m_Parameters.m_ClippingThresCell;
272 float projection_threshold =
m_Data.m_Parameters.m_ClippingThresProj;
275 arm_compute::ActivationLayerInfo activationLayerInfo =
278 for (
unsigned int i = 0; i != maxTime; ++i)
282 arm_compute::ITensor* outputLSTM;
283 arm_compute::ITensor* inputLSTM;
289 if (maxTime == 1 &&
m_Data.m_Parameters.m_TimeMajor)
294 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
295 TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
297 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
298 auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
300 input.info()->set_tensor_shape(acl_input_shape_shrink);
301 inputLSTM =
const_cast<arm_compute::ITensor*
>(&input);
303 output.info()->set_tensor_shape(acl_output_shape_shrink);
304 outputLSTM = &output;
310 else if (maxTime == 1 && !
m_Data.m_Parameters.m_TimeMajor)
313 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
314 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
315 m_PermuteFirstOut.info()->set_tensor_shape(acl_input_shape_shrink);
316 inputLSTM = &m_PermuteFirstOut;
318 outputLSTM =
const_cast<arm_compute::ITensor*
>(m_ConcatInputs[i]);
323 inputLSTM = m_SplitterOutputs[i];
324 outputLSTM =
const_cast<arm_compute::ITensor*
>(m_ConcatInputs[i]);
327 std::unique_ptr<arm_compute::NELSTMLayer> lstm_layer(
new arm_compute::NELSTMLayer());
328 lstm_layer->configure(inputLSTM,
329 m_InputToForgetWeightsTensor.get(),
330 m_InputToCellWeightsTensor.get(),
331 m_InputToOutputWeightsTensor.get(),
332 m_RecurrentToForgetWeightsTensor.get(),
333 m_RecurrentToCellWeightsTensor.get(),
334 m_RecurrentToOutputWeightsTensor.get(),
335 m_ForgetGateBiasTensor.get(),
336 m_CellBiasTensor.get(),
337 m_OutputGateBiasTensor.get(),
340 m_ScratchBuffer.get(),
347 projection_threshold);
349 m_Layers.emplace_back(std::move(lstm_layer));
352 armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
364 if (!
m_Data.m_Parameters.m_CifgEnabled)
368 if (
m_Data.m_CellToInputWeights !=
nullptr)
375 if (
m_Data.m_Parameters.m_ProjectionEnabled)
378 if (
m_Data.m_ProjectionBias !=
nullptr)
384 if (
m_Data.m_Parameters.m_PeepholeEnabled)
390 if (
m_Data.m_Parameters.m_LayerNormEnabled)
392 if (!
m_Data.m_Parameters.m_CifgEnabled)
403 for (uint32_t i = 0; i < m_Layers.size(); ++i)
405 m_Layers[i]->prepare();
414 TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
415 TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
419 for (
unsigned int i = 0; i < maxTime; ++i)
421 m_ConcatInputs[i]->info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
425 for (
unsigned int inputIdx = 0u; inputIdx < maxTime; ++inputIdx)
431 m_Concat.reset(
new arm_compute::NEConcatenateLayer());
433 if (!
m_Data.m_Parameters.m_TimeMajor)
435 TensorInfo concatOutputTensorInfo = outputInfo;
436 concatOutputTensorInfo.
SetShape(timeMajorShapeOutput);
437 BuildArmComputeTensor(concat_out, concatOutputTensorInfo);
438 armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_out);
440 m_Concat->configure(m_ConcatInputs, &concat_out, aclAxisConcat);
444 m_Concat->configure(m_ConcatInputs, &output, aclAxisConcat);
453 if (!
m_Data.m_Parameters.m_TimeMajor)
455 output.info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandBatchMajor));
459 output.info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
466 if (!
m_Data.m_Parameters.m_TimeMajor)
469 std::unique_ptr<arm_compute::NEPermute> layer(
new arm_compute::NEPermute());
472 layer->configure(&concat_out, &output, arm_compute::PermutationVector(0U, 2U, 1U));
476 layer->configure(m_ConcatInputs[0], &output, arm_compute::PermutationVector(0U, 2U, 1U));
478 m_Permute2.reset(layer.release());
495 for (uint32_t i = 0; i < m_Layers.size(); ++i)
525 "Unidirectional Sequence LSTM layer validate status failed.");
528 unsigned int maxTime = descriptor.
m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];
529 unsigned int batchSize = descriptor.
m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];
530 unsigned int inputSize = inputLayerShape[2];
531 unsigned int outputSize = outputLayerShape[2];
533 const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
534 const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
547 const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
548 const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
554 arm_compute::TensorInfo aclPermuteOutInfo = armcomputetensorutils::BuildArmComputeTensorInfo(permuteOutInfo);
557 statusPermute1 = arm_compute::NEPermute::validate(&aclInputInfo,
559 arm_compute::PermutationVector(0U, 2U, 1U));
565 std::vector<arm_compute::TensorInfo> splitterOutputsTensorInfos;
566 std::vector<arm_compute::TensorInfo> concatInputsTensorInfos;
567 std::vector<arm_compute::ITensorInfo*> splitterOutputsTensorInfosPtr;
568 std::vector<const arm_compute::ITensorInfo*> concatInputsTensorInfosPtr;
569 splitterOutputsTensorInfos.reserve(maxTime);
570 concatInputsTensorInfos.reserve(maxTime);
571 for (
unsigned int i = 0; i < maxTime; ++i)
573 arm_compute::TensorInfo splitter_out;
574 arm_compute::TensorInfo concat_in;
578 splitterTensorInfo.SetShape({batchSize, inputSize});
579 concatTensorInfo.SetShape({batchSize, outputSize});
581 arm_compute::TensorInfo aclSplitterTensorInfo
582 = armcomputetensorutils::BuildArmComputeTensorInfo(splitterTensorInfo);
583 arm_compute::TensorInfo aclConcatTensorInfo
584 = armcomputetensorutils::BuildArmComputeTensorInfo(concatTensorInfo);
586 splitterOutputsTensorInfos.emplace_back(aclSplitterTensorInfo);
587 concatInputsTensorInfos.emplace_back(aclConcatTensorInfo);
588 splitterOutputsTensorInfosPtr.emplace_back(&splitterOutputsTensorInfos[i]);
589 concatInputsTensorInfosPtr.emplace_back(&concatInputsTensorInfos[i]);
595 unsigned int numberDimensions = 3;
596 unsigned int dimension = 0;
597 unsigned int aclAxisSplit = CalcAclAxis(numberDimensions, dimension);
603 statusSplit = arm_compute::NESplit::validate(&aclPermuteOutInfo,
604 splitterOutputsTensorInfosPtr,
609 statusSplit = arm_compute::NESplit::validate(&aclInputInfo, splitterOutputsTensorInfosPtr, aclAxisSplit);
617 arm_compute::LSTMParams<arm_compute::ITensorInfo> lstm_params_info;
619 unsigned int numUnits = cellStateIn.
GetShape()[1];
620 unsigned int scratchBufferFactor = 4;
625 scratchBufferFactor = 3;
631 const arm_compute::TensorInfo aclOutputStateInInfo = BuildArmComputeTensorInfo(outputStateIn);
632 const arm_compute::TensorInfo aclCellStateInInfo = BuildArmComputeTensorInfo(cellStateIn);
633 const arm_compute::TensorInfo aclScratchBufferInfo = BuildArmComputeTensorInfo(scratchBuffer);
634 const arm_compute::TensorInfo aclOutputStateOutInfo = BuildArmComputeTensorInfo(outputStateOut);
635 const arm_compute::TensorInfo aclCellStateOutInfo = BuildArmComputeTensorInfo(cellStateOut);
638 const arm_compute::TensorInfo aclInputToForgetWeightsInfo
640 const arm_compute::TensorInfo aclInputToCellWeightsInfo
642 const arm_compute::TensorInfo aclInputToOutputWeightsInfo
644 const arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo
646 const arm_compute::TensorInfo aclRecurrentToCellWeightsInfo
648 const arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo
650 const arm_compute::TensorInfo aclForgetGateBiasInfo
652 const arm_compute::TensorInfo aclCellBiasInfo
653 = BuildArmComputeTensorInfo(paramsInfo.
GetCellBias());
654 const arm_compute::TensorInfo aclOutputGateBiasInfo
657 arm_compute::TensorInfo aclInputToInputWeightsInfo;
658 arm_compute::TensorInfo aclRecurrentToInputWeightsInfo;
659 arm_compute::TensorInfo aclCellToInputWeightsInfo;
660 arm_compute::TensorInfo aclInputGateBiasInfo;
661 arm_compute::TensorInfo aclProjectionWeightsInfo;
662 arm_compute::TensorInfo aclProjectionBiasInfo;
663 arm_compute::TensorInfo aclCellToForgetWeightsInfo;
664 arm_compute::TensorInfo aclCellToOutputWeightsInfo;
666 arm_compute::TensorInfo aclInputLayerNormWeightsInfo;
667 arm_compute::TensorInfo aclForgetLayerNormWeightsInfo;
668 arm_compute::TensorInfo aclCellLayerNormWeightsInfo;
669 arm_compute::TensorInfo aclOutputLayerNormWeightsInfo;
680 aclInputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.
GetInputGateBias());
682 lstm_params_info.set_cifg_params(&aclInputToInputWeightsInfo,
683 &aclRecurrentToInputWeightsInfo,
685 &aclInputGateBiasInfo);
692 aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.
GetProjectionBias());
696 lstm_params_info.set_projection_params(&aclProjectionWeightsInfo,
705 lstm_params_info.set_peephole_params(&aclCellToForgetWeightsInfo, &aclCellToOutputWeightsInfo);
718 lstm_params_info.set_layer_normalization_params(descriptor.
m_CifgEnabled ?
nullptr :
719 &aclInputLayerNormWeightsInfo,
720 &aclForgetLayerNormWeightsInfo,
721 &aclCellLayerNormWeightsInfo,
722 &aclOutputLayerNormWeightsInfo);
729 arm_compute::ActivationLayerInfo activationLayerInfo =
732 for (
unsigned int i = 0; i != maxTime; ++i)
737 arm_compute::ITensorInfo* outputLSTM;
738 arm_compute::ITensorInfo* inputLSTM;
749 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
750 TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
752 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
753 auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
755 const_cast<arm_compute::TensorInfo*
>(&aclInputInfo)->set_tensor_shape(acl_input_shape_shrink);
756 inputLSTM =
const_cast<arm_compute::TensorInfo*
>(&aclInputInfo);
758 const_cast<arm_compute::TensorInfo*
>(&aclOutputInfo)->set_tensor_shape(acl_output_shape_shrink);
759 outputLSTM =
const_cast<arm_compute::TensorInfo*
>(&aclOutputInfo);
768 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
769 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
770 aclPermuteOutInfo.set_tensor_shape(acl_input_shape_shrink);
771 inputLSTM = &aclPermuteOutInfo;
773 outputLSTM =
const_cast<arm_compute::ITensorInfo*
>(concatInputsTensorInfosPtr[i]);
778 inputLSTM = splitterOutputsTensorInfosPtr[i];
779 outputLSTM =
const_cast<arm_compute::ITensorInfo*
>(concatInputsTensorInfosPtr[i]);
782 statusLSTM = arm_compute::NELSTMLayer::validate(inputLSTM,
783 &aclInputToForgetWeightsInfo,
784 &aclInputToCellWeightsInfo,
785 &aclInputToOutputWeightsInfo,
786 &aclRecurrentToForgetWeightsInfo,
787 &aclRecurrentToCellWeightsInfo,
788 &aclRecurrentToOutputWeightsInfo,
789 &aclForgetGateBiasInfo,
791 &aclOutputGateBiasInfo,
792 &aclOutputStateInInfo,
794 &aclScratchBufferInfo,
795 &aclOutputStateOutInfo,
796 &aclCellStateOutInfo,
801 projection_threshold);
803 if (statusLSTM.error_code() != arm_compute::ErrorCode::OK)
815 TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
816 TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
819 concatOutputTensorInfo.SetShape(timeMajorShapeOutput);
820 arm_compute::TensorInfo aclConcatOutputTensorInfo= BuildArmComputeTensorInfo(concatOutputTensorInfo);
824 for (
unsigned int i = 0; i < maxTime; ++i)
826 auto acl_shape_expand = BuildArmComputeTensorShape(shapeExpandTimeMajor);
827 concatInputsTensorInfos[i].set_tensor_shape(acl_shape_expand);
830 unsigned int aclAxisConcat = CalcAclAxis(numberDimensions, dimension);
833 statusConcat = arm_compute::NEConcatenateLayer::validate(concatInputsTensorInfosPtr,
834 &aclConcatOutputTensorInfo,
839 statusConcat = arm_compute::NEConcatenateLayer::validate(concatInputsTensorInfosPtr,
850 const_cast<arm_compute::TensorInfo*
>(&aclInputInfo)->set_tensor_shape(
851 BuildArmComputeTensorShape(shapeExpandBatchMajor));
855 const_cast<arm_compute::TensorInfo*
>(&aclInputInfo)->set_tensor_shape(
856 BuildArmComputeTensorShape(shapeExpandTimeMajor));
868 statusPermute2 = arm_compute::NEPermute::validate(&aclConcatOutputTensorInfo,
870 arm_compute::PermutationVector(0U, 2U, 1U));
874 statusPermute2 = arm_compute::NEPermute::validate(concatInputsTensorInfosPtr[0],
876 arm_compute::PermutationVector(0U, 2U, 1U));
880 auto okCode = arm_compute::ErrorCode::OK;
881 if (statusPermute1.error_code() == okCode &&
882 statusSplit.error_code() == okCode &&
883 statusLSTM .error_code() == okCode &&
884 statusConcat.error_code() == okCode &&
885 statusPermute2.error_code() == okCode)
888 "All Unidirectional Sequence LSTM layer validate status OK.");
893 "Unidirectional Sequence LSTM layer validate status failed.");
897 void NeonUnidirectionalSequenceLstmFloatWorkload::FreeUnusedTensors()
899 FreeTensorIfUnused(m_InputToInputWeightsTensor);
900 FreeTensorIfUnused(m_InputToForgetWeightsTensor);
901 FreeTensorIfUnused(m_InputToCellWeightsTensor);
902 FreeTensorIfUnused(m_InputToOutputWeightsTensor);
903 FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);
904 FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);
905 FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);
906 FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);
907 FreeTensorIfUnused(m_CellToInputWeightsTensor);
908 FreeTensorIfUnused(m_CellToForgetWeightsTensor);
909 FreeTensorIfUnused(m_CellToOutputWeightsTensor);
910 FreeTensorIfUnused(m_InputGateBiasTensor);
911 FreeTensorIfUnused(m_ForgetGateBiasTensor);
912 FreeTensorIfUnused(m_CellBiasTensor);
913 FreeTensorIfUnused(m_OutputGateBiasTensor);
914 FreeTensorIfUnused(m_ProjectionWeightsTensor);
915 FreeTensorIfUnused(m_ProjectionBiasTensor);
916 FreeTensorIfUnused(m_InputLayerNormWeightsTensor);
917 FreeTensorIfUnused(m_ForgetLayerNormWeightsTensor);
918 FreeTensorIfUnused(m_CellLayerNormWeightsTensor);
919 FreeTensorIfUnused(m_OutputLayerNormWeightsTensor);
920 FreeTensorIfUnused(m_ScratchBuffer);
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
arm::pipe::ProfilingGuid GetGuid() const final
NeonUnidirectionalSequenceLstmFloatWorkload(const UnidirectionalSequenceLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual void Execute() const override
const TensorShape & GetShape() const
void SetShape(const TensorShape &newShape)
DataType GetDataType() const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Copyright (c) 2021 ARM Limited and Contributors.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo)
arm_compute::ActivationLayerInfo ConvertLstmActivationFuncToAclLayerInfo(uint32_t activationFunction)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
An LstmDescriptor for the LstmLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
bool m_TimeMajor
Enable/disable time major.
bool m_LayerNormEnabled
Enable/disable layer normalization.
float m_ClippingThresCell
Clipping threshold value for the cell state.
bool m_ProjectionEnabled
Enable/disable the projection layer.
float m_ClippingThresProj
Clipping threshold value for the projection.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
uint32_t m_ActivationFunc
The activation function to use.
An OriginsDescriptor for the ConcatLayer.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
uint32_t GetNumDimensions() const
Get the number of dimensions.
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
LayerDescriptor m_Parameters
A ViewsDescriptor for the SplitterLayer.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
uint32_t GetNumDimensions() const
Get the number of dimensions.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
Contains information about TensorInfos of a layer.