14 #include <cl/test/ClWorkloadFactoryHelper.hpp>
21 unsigned int CalcAclAxis(
unsigned int numDimensions,
unsigned int axis)
23 return (numDimensions - axis) - 1;
29 using namespace armcomputetensorutils;
34 const arm_compute::CLCompileContext& clCompileContext)
43 const arm_compute::ICLTensor& input =
static_cast<IClTensorHandle*
>(m_Data.m_Inputs[0])->GetTensor();
44 arm_compute::ICLTensor& output =
static_cast<IClTensorHandle*
>(m_Data.m_Outputs[2])->GetTensor();
56 unsigned int maxTime = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];
57 unsigned int batchSize = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];
58 unsigned int inputSize = inputLayerShape[2];
59 unsigned int outputSize = outputLayerShape[2];
60 unsigned int numUnits = cellStateLayerShape[1];
62 const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
63 const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
68 if (!m_Data.m_Parameters.m_TimeMajor)
70 std::unique_ptr<arm_compute::CLPermute> layer(
new arm_compute::CLPermute());
73 permuteOutInfo.
SetShape(timeMajorShapeInput);
74 BuildArmComputeTensor(m_PermuteFirstOut, permuteOutInfo);
75 armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_PermuteFirstOut);
78 layer->configure(clCompileContext, &input, &m_PermuteFirstOut, arm_compute::PermutationVector(0U,2U,1U));
79 m_Permute1.reset(layer.release());
85 for (
unsigned int i = 0; i < maxTime; ++i)
87 arm_compute::CLTensor splitter_out;
88 arm_compute::CLTensor concat_in;
90 auto splitterTensorInfo = inputInfo;
91 auto concatTensorInfo = outputInfo;
92 splitterTensorInfo.
SetShape({batchSize, inputSize});
93 concatTensorInfo.SetShape({batchSize, outputSize});
94 BuildArmComputeTensor(splitter_out, splitterTensorInfo);
95 BuildArmComputeTensor(concat_in, concatTensorInfo);
97 armcomputetensorutils::InitialiseArmComputeTensorEmpty(splitter_out);
98 armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_in);
101 m_SplitterOutputsTensors.push_back(std::move(splitter_out));
102 m_ConcatInputsTensors.push_back(std::move(concat_in));
105 for (
unsigned int i = 0; i < maxTime; ++i)
108 m_SplitterOutputs.push_back(&m_SplitterOutputsTensors[i]);
109 m_ConcatInputs.push_back(&m_ConcatInputsTensors[i]);
115 unsigned int numberDimensions = 3;
116 unsigned int dimension = 0;
121 unsigned int splitterDimSizes[3] = {1, batchSize, inputSize};
122 for (
unsigned int outputIdx = 0u; outputIdx < maxTime; ++outputIdx)
124 splitterDesc.
SetViewOriginCoord(outputIdx, dimension, splitterDimSizes[dimension] * outputIdx);
125 for (
unsigned int dimIdx = 0u; dimIdx < numberDimensions; ++dimIdx)
127 splitterDesc.
SetViewSize(outputIdx, dimIdx, splitterDimSizes[dimIdx]);
131 std::set<unsigned int> splitAxis =
ComputeSplitAxis(splitterDesc, timeMajorShapeInput);
133 std::unique_ptr<arm_compute::CLSplit> split_layer(
new arm_compute::CLSplit());
134 unsigned int aclAxisSplit = CalcAclAxis(splitterDesc.
GetNumDimensions(), *splitAxis.begin());
135 if (!m_Data.m_Parameters.m_TimeMajor)
137 split_layer->configure(&m_PermuteFirstOut, m_SplitterOutputs, aclAxisSplit);
141 split_layer->configure(&input, m_SplitterOutputs, aclAxisSplit);
144 split_layer->prepare();
145 m_Splitter.reset(split_layer.release());
151 arm_compute::LSTMParams<arm_compute::ICLTensor> lstm_param;
153 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
154 BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
156 m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
157 BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
159 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
160 BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
162 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
163 BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
165 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
166 BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
168 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
169 BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
171 m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
172 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
174 m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
175 BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
177 m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
178 BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
181 if (!m_Data.m_Parameters.m_CifgEnabled)
183 m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
184 BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
186 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
187 BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
189 m_CellToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
190 if (m_Data.m_CellToInputWeights !=
nullptr)
192 BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
195 m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
196 BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
198 lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
199 m_RecurrentToInputWeightsTensor.get(),
200 m_Data.m_CellToInputWeights ? m_CellToInputWeightsTensor.get() :
nullptr,
201 m_InputGateBiasTensor.get());
204 if (m_Data.m_Parameters.m_ProjectionEnabled)
206 m_ProjectionWeightsTensor = std::make_unique<arm_compute::CLTensor>();
207 BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
209 m_ProjectionBiasTensor = std::make_unique<arm_compute::CLTensor>();
210 if (m_Data.m_ProjectionBias !=
nullptr)
212 BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
215 lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
216 m_Data.m_ProjectionBias ? m_ProjectionBiasTensor.get() :
nullptr);
219 if (m_Data.m_Parameters.m_PeepholeEnabled)
221 m_CellToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
222 BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
224 m_CellToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
225 BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
227 lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
230 if (m_Data.m_Parameters.m_LayerNormEnabled)
232 m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
233 if (!m_Data.m_Parameters.m_CifgEnabled)
235 BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
238 m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
239 BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
241 m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
242 BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
244 m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
245 BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
247 auto inputNormWeightTensor = m_Data.m_Parameters.m_CifgEnabled ? nullptr : m_InputLayerNormWeightsTensor.get();
248 lstm_param.set_layer_normalization_params(inputNormWeightTensor,
249 m_ForgetLayerNormWeightsTensor.get(),
250 m_CellLayerNormWeightsTensor.get(),
251 m_OutputLayerNormWeightsTensor.get());
254 arm_compute::ICLTensor& output_state_in =
static_cast<IClTensorHandle*
>(m_Data.m_Inputs[1])->GetTensor();
255 arm_compute::ICLTensor& cell_state_in =
static_cast<IClTensorHandle*
>(m_Data.m_Inputs[2])->GetTensor();
257 arm_compute::ICLTensor& output_state_out =
static_cast<IClTensorHandle*
>(m_Data.m_Inputs[1])->GetTensor();
258 arm_compute::ICLTensor& cell_state_out =
static_cast<IClTensorHandle*
>(m_Data.m_Inputs[2])->GetTensor();
260 m_ScratchBuffer = std::make_unique<arm_compute::CLTensor>();
261 if (m_Data.m_Parameters.m_CifgEnabled)
264 BuildArmComputeTensor(*m_ScratchBuffer,
TensorInfo({batchSize, numUnits * 3}, armnnDataType));
269 BuildArmComputeTensor(*m_ScratchBuffer,
TensorInfo({batchSize, numUnits * 4}, armnnDataType));
273 float cell_threshold = m_Data.m_Parameters.m_ClippingThresCell;
274 float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
277 arm_compute::ActivationLayerInfo activationLayerInfo =
280 for (
unsigned int i = 0; i != maxTime; ++i)
284 arm_compute::ICLTensor* outputLSTM;
285 arm_compute::ICLTensor* inputLSTM;
290 if (maxTime == 1 && m_Data.m_Parameters.m_TimeMajor)
294 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
295 TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
296 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
297 auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
298 (&input)->
info()->set_tensor_shape(acl_input_shape_shrink);
299 inputLSTM =
const_cast<arm_compute::ICLTensor*
>(&input);
300 (&output)->
info()->set_tensor_shape(acl_output_shape_shrink);
301 outputLSTM = &output;
307 else if (maxTime == 1 && !m_Data.m_Parameters.m_TimeMajor)
310 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
311 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
312 m_PermuteFirstOut.info()->set_tensor_shape(acl_input_shape_shrink);
313 inputLSTM = &m_PermuteFirstOut;
314 outputLSTM =
const_cast<arm_compute::ICLTensor*
>(m_ConcatInputs[i]);
319 inputLSTM = m_SplitterOutputs[i];
320 outputLSTM =
const_cast<arm_compute::ICLTensor*
>(m_ConcatInputs[i]);
323 std::unique_ptr<arm_compute::CLLSTMLayer> lstm_layer(
new arm_compute::CLLSTMLayer());
324 lstm_layer->configure(clCompileContext,
326 m_InputToForgetWeightsTensor.get(),
327 m_InputToCellWeightsTensor.get(),
328 m_InputToOutputWeightsTensor.get(),
329 m_RecurrentToForgetWeightsTensor.get(),
330 m_RecurrentToCellWeightsTensor.get(),
331 m_RecurrentToOutputWeightsTensor.get(),
332 m_ForgetGateBiasTensor.get(),
333 m_CellBiasTensor.get(),
334 m_OutputGateBiasTensor.get(),
337 m_ScratchBuffer.get(),
344 projection_threshold);
346 m_Layers.emplace_back(std::move(lstm_layer));
349 armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
361 if (!m_Data.m_Parameters.m_CifgEnabled)
365 if (m_Data.m_CellToInputWeights !=
nullptr)
372 if (m_Data.m_Parameters.m_ProjectionEnabled)
375 if (m_Data.m_ProjectionBias !=
nullptr)
381 if (m_Data.m_Parameters.m_PeepholeEnabled)
387 if (m_Data.m_Parameters.m_LayerNormEnabled)
389 if (!m_Data.m_Parameters.m_CifgEnabled)
400 for (uint32_t i = 0; i < m_Layers.size(); ++i)
402 m_Layers[i]->prepare();
411 TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
412 TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
416 for (
unsigned int i = 0; i < maxTime; ++i)
418 m_ConcatInputs[i]->info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
422 for (
unsigned int inputIdx = 0u; inputIdx < maxTime; ++inputIdx)
428 m_Concat.reset(
new arm_compute::CLConcatenateLayer());
429 unsigned int aclAxisConcat = CalcAclAxis(concatDescriptor.
GetNumDimensions(),
431 if (!m_Data.m_Parameters.m_TimeMajor)
433 TensorInfo concatOuputTensorInfo = outputInfo;
434 concatOuputTensorInfo.
SetShape(timeMajorShapeOutput);
435 BuildArmComputeTensor(concat_out, concatOuputTensorInfo);
436 armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_out);
438 m_Concat->configure(m_ConcatInputs, &concat_out, aclAxisConcat);
442 m_Concat->configure(m_ConcatInputs, &output, aclAxisConcat);
451 if (!m_Data.m_Parameters.m_TimeMajor)
453 (&output)->
info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandBatchMajor));
457 (&output)->
info()->set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));
464 if (!m_Data.m_Parameters.m_TimeMajor)
467 std::unique_ptr<arm_compute::CLPermute> layer(
new arm_compute::CLPermute());
470 layer->configure(clCompileContext, &concat_out, &output, arm_compute::PermutationVector(0U, 2U, 1U));
474 layer->configure(clCompileContext, m_ConcatInputs[0], &output, arm_compute::PermutationVector(0U, 2U, 1U));
476 m_Permute2.reset(layer.release());
493 for (uint32_t i = 0; i < m_Layers.size(); ++i)
523 "Unidirectional Sequence LSTM layer validate status failed.");
526 unsigned int maxTime = descriptor.
m_TimeMajor?inputLayerShape[0]:inputLayerShape[1];
527 unsigned int batchSize = descriptor.
m_TimeMajor?inputLayerShape[1]:inputLayerShape[0];
528 unsigned int inputSize = inputLayerShape[2];
529 unsigned int outputSize = outputLayerShape[2];
531 const TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});
532 const TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});
545 const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
546 const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
552 arm_compute::TensorInfo aclPermuteOutInfo = armcomputetensorutils::BuildArmComputeTensorInfo(permuteOutInfo);
555 statusPermute1 = arm_compute::CLPermute::validate(&aclInputInfo,
557 arm_compute::PermutationVector(0U, 2U, 1U));
563 std::vector<arm_compute::TensorInfo> splitterOutputsTensorInfos;
564 std::vector<arm_compute::TensorInfo> concatInputsTensorInfos;
565 std::vector<arm_compute::ITensorInfo*> splitterOutputsTensorInfosPtr;
566 std::vector<const arm_compute::ITensorInfo*> concatInputsTensorInfosPtr;
567 splitterOutputsTensorInfos.reserve(maxTime);
568 concatInputsTensorInfos.reserve(maxTime);
569 for (
unsigned int i = 0; i < maxTime; ++i)
571 arm_compute::TensorInfo splitter_out;
572 arm_compute::TensorInfo concat_in;
576 splitterTensorInfo.SetShape({batchSize, inputSize});
577 concatTensorInfo.SetShape({batchSize, outputSize});
579 arm_compute::TensorInfo aclSplitterTensorInfo
580 = armcomputetensorutils::BuildArmComputeTensorInfo(splitterTensorInfo);
581 arm_compute::TensorInfo aclConcatTensorInfo
582 = armcomputetensorutils::BuildArmComputeTensorInfo(concatTensorInfo);
584 splitterOutputsTensorInfos.emplace_back(aclSplitterTensorInfo);
585 concatInputsTensorInfos.emplace_back(aclConcatTensorInfo);
586 splitterOutputsTensorInfosPtr.emplace_back(&splitterOutputsTensorInfos[i]);
587 concatInputsTensorInfosPtr.emplace_back(&concatInputsTensorInfos[i]);
593 unsigned int numberDimensions = 3;
594 unsigned int dimension = 0;
595 unsigned int aclAxisSplit = CalcAclAxis(numberDimensions, dimension);
601 statusSplit = arm_compute::CLSplit::validate(&aclPermuteOutInfo,
602 splitterOutputsTensorInfosPtr,
607 statusSplit = arm_compute::CLSplit::validate(&aclInputInfo, splitterOutputsTensorInfosPtr, aclAxisSplit);
615 arm_compute::LSTMParams<arm_compute::ITensorInfo> lstm_params_info;
617 unsigned int numUnits = cellStateIn.
GetShape()[1];
618 unsigned int scratchBufferFactor = 4;
623 scratchBufferFactor = 3;
629 const arm_compute::TensorInfo aclOutputStateInInfo = BuildArmComputeTensorInfo(outputStateIn);
630 const arm_compute::TensorInfo aclCellStateInInfo = BuildArmComputeTensorInfo(cellStateIn);
631 const arm_compute::TensorInfo aclScratchBufferInfo = BuildArmComputeTensorInfo(scratchBuffer);
632 const arm_compute::TensorInfo aclOutputStateOutInfo = BuildArmComputeTensorInfo(outputStateOut);
633 const arm_compute::TensorInfo aclCellStateOutInfo = BuildArmComputeTensorInfo(cellStateOut);
636 const arm_compute::TensorInfo aclInputToForgetWeightsInfo
638 const arm_compute::TensorInfo aclInputToCellWeightsInfo
640 const arm_compute::TensorInfo aclInputToOutputWeightsInfo
642 const arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo
644 const arm_compute::TensorInfo aclRecurrentToCellWeightsInfo
646 const arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo
648 const arm_compute::TensorInfo aclForgetGateBiasInfo
650 const arm_compute::TensorInfo aclCellBiasInfo
651 = BuildArmComputeTensorInfo(paramsInfo.
GetCellBias());
652 const arm_compute::TensorInfo aclOutputGateBiasInfo
655 arm_compute::TensorInfo aclInputToInputWeightsInfo;
656 arm_compute::TensorInfo aclRecurrentToInputWeightsInfo;
657 arm_compute::TensorInfo aclCellToInputWeightsInfo;
658 arm_compute::TensorInfo aclInputGateBiasInfo;
659 arm_compute::TensorInfo aclProjectionWeightsInfo;
660 arm_compute::TensorInfo aclProjectionBiasInfo;
661 arm_compute::TensorInfo aclCellToForgetWeightsInfo;
662 arm_compute::TensorInfo aclCellToOutputWeightsInfo;
664 arm_compute::TensorInfo aclInputLayerNormWeightsInfo;
665 arm_compute::TensorInfo aclForgetLayerNormWeightsInfo;
666 arm_compute::TensorInfo aclCellLayerNormWeightsInfo;
667 arm_compute::TensorInfo aclOutputLayerNormWeightsInfo;
678 aclInputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.
GetInputGateBias());
680 lstm_params_info.set_cifg_params(&aclInputToInputWeightsInfo,
681 &aclRecurrentToInputWeightsInfo,
683 &aclInputGateBiasInfo);
690 aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.
GetProjectionBias());
694 lstm_params_info.set_projection_params(&aclProjectionWeightsInfo,
703 lstm_params_info.set_peephole_params(&aclCellToForgetWeightsInfo, &aclCellToOutputWeightsInfo);
716 lstm_params_info.set_layer_normalization_params(descriptor.
m_CifgEnabled ?
nullptr :
717 &aclInputLayerNormWeightsInfo,
718 &aclForgetLayerNormWeightsInfo,
719 &aclCellLayerNormWeightsInfo,
720 &aclOutputLayerNormWeightsInfo);
727 arm_compute::ActivationLayerInfo activationLayerInfo =
730 for (
unsigned int i = 0; i != maxTime; ++i)
735 arm_compute::ITensorInfo* outputLSTM;
736 arm_compute::ITensorInfo* inputLSTM;
745 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
746 TensorShape outputShapeShrink({outputShape[1], outputShape[2]});
747 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
748 auto acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);
749 const_cast<arm_compute::TensorInfo*
>(&aclInputInfo)->set_tensor_shape(acl_input_shape_shrink);
750 inputLSTM =
const_cast<arm_compute::TensorInfo*
>(&aclInputInfo);
751 const_cast<arm_compute::TensorInfo*
>(&aclOutputInfo)->set_tensor_shape(acl_output_shape_shrink);
752 outputLSTM =
const_cast<arm_compute::TensorInfo*
>(&aclOutputInfo);
761 TensorShape inputShapeShrink({inputShape[1], inputShape[2]});
762 auto acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);
763 aclPermuteOutInfo.set_tensor_shape(acl_input_shape_shrink);
764 inputLSTM = &aclPermuteOutInfo;
765 outputLSTM =
const_cast<arm_compute::ITensorInfo*
>(concatInputsTensorInfosPtr[i]);
770 inputLSTM = splitterOutputsTensorInfosPtr[i];
771 outputLSTM =
const_cast<arm_compute::ITensorInfo*
>(concatInputsTensorInfosPtr[i]);
774 statusLSTM = arm_compute::CLLSTMLayer::validate(inputLSTM,
775 &aclInputToForgetWeightsInfo,
776 &aclInputToCellWeightsInfo,
777 &aclInputToOutputWeightsInfo,
778 &aclRecurrentToForgetWeightsInfo,
779 &aclRecurrentToCellWeightsInfo,
780 &aclRecurrentToOutputWeightsInfo,
781 &aclForgetGateBiasInfo,
783 &aclOutputGateBiasInfo,
784 &aclOutputStateInInfo,
786 &aclScratchBufferInfo,
787 &aclOutputStateOutInfo,
788 &aclCellStateOutInfo,
793 projection_threshold);
795 if (statusLSTM.error_code() != arm_compute::ErrorCode::OK)
807 TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});
808 TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});
811 concatOuputTensorInfo.SetShape(timeMajorShapeOutput);
812 arm_compute::TensorInfo aclConcatOuputTensorInfo= BuildArmComputeTensorInfo(concatOuputTensorInfo);
816 for (
unsigned int i = 0; i < maxTime; ++i)
818 auto acl_shape_expand = BuildArmComputeTensorShape(shapeExpandTimeMajor);
819 concatInputsTensorInfos[i].set_tensor_shape(acl_shape_expand);
822 unsigned int aclAxisConcat = CalcAclAxis(numberDimensions, dimension);
825 statusConcat = arm_compute::CLConcatenateLayer::validate(concatInputsTensorInfosPtr,
826 &aclConcatOuputTensorInfo,
831 statusConcat = arm_compute::CLConcatenateLayer::validate(concatInputsTensorInfosPtr,
842 const_cast<arm_compute::TensorInfo*
>(&aclInputInfo)->set_tensor_shape(
843 BuildArmComputeTensorShape(shapeExpandBatchMajor));
847 const_cast<arm_compute::TensorInfo*
>(&aclInputInfo)->set_tensor_shape(
848 BuildArmComputeTensorShape(shapeExpandTimeMajor));
859 statusPermute2 = arm_compute::CLPermute::validate(&aclConcatOuputTensorInfo,
861 arm_compute::PermutationVector(0U, 2U, 1U));
865 statusPermute2 = arm_compute::CLPermute::validate(concatInputsTensorInfosPtr[0],
867 arm_compute::PermutationVector(0U, 2U, 1U));
871 auto okCode = arm_compute::ErrorCode::OK;
872 if (statusPermute1.error_code() == okCode &&
873 statusSplit.error_code() == okCode &&
874 statusLSTM .error_code() == okCode &&
875 statusConcat.error_code() == okCode &&
876 statusPermute2.error_code() == okCode)
879 "All Unidirectional Sequence LSTM layer validate status OK.");
884 "Unidirectional Sequence LSTM layer validate status failed.");
888 void ClUnidirectionalSequenceLstmFloatWorkload::FreeUnusedTensors()
890 FreeTensorIfUnused(m_InputToInputWeightsTensor);
891 FreeTensorIfUnused(m_InputToForgetWeightsTensor);
892 FreeTensorIfUnused(m_InputToCellWeightsTensor);
893 FreeTensorIfUnused(m_InputToOutputWeightsTensor);
894 FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);
895 FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);
896 FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);
897 FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);
898 FreeTensorIfUnused(m_CellToInputWeightsTensor);
899 FreeTensorIfUnused(m_CellToForgetWeightsTensor);
900 FreeTensorIfUnused(m_CellToOutputWeightsTensor);
901 FreeTensorIfUnused(m_InputGateBiasTensor);
902 FreeTensorIfUnused(m_ForgetGateBiasTensor);
903 FreeTensorIfUnused(m_CellBiasTensor);
904 FreeTensorIfUnused(m_OutputGateBiasTensor);
905 FreeTensorIfUnused(m_ProjectionWeightsTensor);
906 FreeTensorIfUnused(m_ProjectionBiasTensor);
907 FreeTensorIfUnused(m_InputLayerNormWeightsTensor);
908 FreeTensorIfUnused(m_ForgetLayerNormWeightsTensor);
909 FreeTensorIfUnused(m_CellLayerNormWeightsTensor);
910 FreeTensorIfUnused(m_OutputLayerNormWeightsTensor);
911 FreeTensorIfUnused(m_ScratchBuffer);