16 using namespace armcomputetensorutils;
24 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
25 const arm_compute::TensorInfo aclPreviousCellStateInInfo = BuildArmComputeTensorInfo(previousCellStateIn);
26 const arm_compute::TensorInfo aclPreviousOutputInInfo = BuildArmComputeTensorInfo(previousOutputIn);
29 const arm_compute::TensorInfo aclCellStateOutInfo = BuildArmComputeTensorInfo(cellStateOut);
30 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
33 const arm_compute::TensorInfo aclInputToInputWeightsInfo
35 const arm_compute::TensorInfo aclInputToForgetWeightsInfo
37 const arm_compute::TensorInfo aclInputToCellWeightsInfo
39 const arm_compute::TensorInfo aclInputToOutputWeightsInfo
41 const arm_compute::TensorInfo aclRecurrentToInputWeightsInfo
43 const arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo
45 const arm_compute::TensorInfo aclRecurrentToCellWeightsInfo
47 const arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo
49 const arm_compute::TensorInfo aclInputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.
GetInputGateBias());
50 const arm_compute::TensorInfo aclForgetGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.
GetForgetGateBias());
51 const arm_compute::TensorInfo aclCellBiasInfo = BuildArmComputeTensorInfo(paramsInfo.
GetCellBias());
52 const arm_compute::TensorInfo aclOutputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.
GetOutputGateBias());
54 return arm_compute::CLLSTMLayerQuantized::validate(&aclInputInfo, &aclInputToInputWeightsInfo,
55 &aclInputToForgetWeightsInfo, &aclInputToCellWeightsInfo,
56 &aclInputToOutputWeightsInfo, &aclRecurrentToInputWeightsInfo,
57 &aclRecurrentToForgetWeightsInfo, &aclRecurrentToCellWeightsInfo,
58 &aclRecurrentToOutputWeightsInfo, &aclInputGateBiasInfo,
59 &aclForgetGateBiasInfo, &aclCellBiasInfo, &aclOutputGateBiasInfo,
60 &aclPreviousCellStateInInfo, &aclPreviousOutputInInfo,
61 &aclCellStateOutInfo, &aclOutputInfo);
66 const arm_compute::CLCompileContext& clCompileContext)
69 m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
70 BuildArmComputeTensor(*m_InputToInputWeightsTensor,
m_Data.m_InputToInputWeights->GetTensorInfo());
72 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
73 BuildArmComputeTensor(*m_InputToForgetWeightsTensor,
m_Data.m_InputToForgetWeights->GetTensorInfo());
75 m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
76 BuildArmComputeTensor(*m_InputToCellWeightsTensor,
m_Data.m_InputToCellWeights->GetTensorInfo());
78 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
79 BuildArmComputeTensor(*m_InputToOutputWeightsTensor,
m_Data.m_InputToOutputWeights->GetTensorInfo());
81 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
82 BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor,
m_Data.m_RecurrentToInputWeights->GetTensorInfo());
84 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
85 BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor,
m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
87 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
88 BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor,
m_Data.m_RecurrentToCellWeights->GetTensorInfo());
90 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
91 BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor,
m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
93 m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
94 BuildArmComputeTensor(*m_InputGateBiasTensor,
m_Data.m_InputGateBias->GetTensorInfo());
96 m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
97 BuildArmComputeTensor(*m_ForgetGateBiasTensor,
m_Data.m_ForgetGateBias->GetTensorInfo());
99 m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
100 BuildArmComputeTensor(*m_CellBiasTensor,
m_Data.m_CellBias->GetTensorInfo());
102 m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
103 BuildArmComputeTensor(*m_OutputGateBiasTensor,
m_Data.m_OutputGateBias->GetTensorInfo());
114 m_QuantizedLstmLayer.configure(clCompileContext, &inputTensor, m_InputToInputWeightsTensor.get(),
115 m_InputToForgetWeightsTensor.get(),
116 m_InputToCellWeightsTensor.get(), m_InputToOutputWeightsTensor.get(),
117 m_RecurrentToInputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(),
118 m_RecurrentToCellWeightsTensor.get(), m_RecurrentToOutputWeightsTensor.get(),
119 m_InputGateBiasTensor.get(), m_ForgetGateBiasTensor.get(),
120 m_CellBiasTensor.get(),
121 m_OutputGateBiasTensor.get(), &cellStateInTensor, &outputStateInTensor,
122 &cellStateOutTensor, &outputStateOutTensor);
138 m_QuantizedLstmLayer.prepare();
148 void ClQuantizedLstmWorkload::FreeUnusedTensors()
150 FreeTensorIfUnused(m_InputToInputWeightsTensor);
151 FreeTensorIfUnused(m_InputToForgetWeightsTensor);
152 FreeTensorIfUnused(m_InputToCellWeightsTensor);
153 FreeTensorIfUnused(m_InputToOutputWeightsTensor);
154 FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);
155 FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);
156 FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);
157 FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);
158 FreeTensorIfUnused(m_InputGateBiasTensor);
159 FreeTensorIfUnused(m_ForgetGateBiasTensor);
160 FreeTensorIfUnused(m_CellBiasTensor);
161 FreeTensorIfUnused(m_OutputGateBiasTensor);
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
ClQuantizedLstmWorkload(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo ¶msInfo)
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.