ArmNN
 25.11
Loading...
Searching...
No Matches
ClQuantizedLstmWorkload Class Reference

#include <ClQuantizedLstmWorkload.hpp>

Inheritance diagram for ClQuantizedLstmWorkload:
[legend]
Collaboration diagram for ClQuantizedLstmWorkload:
[legend]

Public Member Functions

 ClQuantizedLstmWorkload (const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
void Execute () const override
Public Member Functions inherited from ClBaseWorkload< QuantizedLstmQueueDescriptor >
 ClBaseWorkload (const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
Public Member Functions inherited from BaseWorkload< QuantizedLstmQueueDescriptor >
 BaseWorkload (const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual const std::string & GetName () const override
void PostAllocationConfigure () override
const QuantizedLstmQueueDescriptorGetData () const
arm::pipe::ProfilingGuid GetGuid () const final
virtual bool SupportsTensorHandleReplacement () const override
Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()

Additional Inherited Members

Protected Member Functions inherited from ClBaseWorkload< QuantizedLstmQueueDescriptor >
virtual void Reconfigure ()
Protected Attributes inherited from BaseWorkload< QuantizedLstmQueueDescriptor >
QuantizedLstmQueueDescriptor m_Data
const arm::pipe::ProfilingGuid m_Guid
const std::string m_Name

Detailed Description

Definition at line 24 of file ClQuantizedLstmWorkload.hpp.

Constructor & Destructor Documentation

◆ ClQuantizedLstmWorkload()

ClQuantizedLstmWorkload ( const QuantizedLstmQueueDescriptor & descriptor,
const WorkloadInfo & info,
const arm_compute::CLCompileContext & clCompileContext )

Definition at line 64 of file ClQuantizedLstmWorkload.cpp.

67 : ClBaseWorkload<QuantizedLstmQueueDescriptor>(descriptor, info)
68{
69 m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
70 BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
71
72 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
73 BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
74
75 m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
76 BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
77
78 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
79 BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
80
81 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
82 BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
83
84 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
85 BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
86
87 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
88 BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
89
90 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
91 BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
92
93 m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
94 BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
95
96 m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
97 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
98
99 m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
100 BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
101
102 m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
103 BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
104
105 const arm_compute::ICLTensor& inputTensor = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
106 arm_compute::ICLTensor& cellStateInTensor = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
107 const arm_compute::ICLTensor& outputStateInTensor = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
108
109 arm_compute::ICLTensor& cellStateOutTensor = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
110 arm_compute::ICLTensor& outputStateOutTensor = static_cast<IClTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
111
112 {
113 ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizedLstmWorkload_configure");
114 m_QuantizedLstmLayer.configure(clCompileContext, &inputTensor, m_InputToInputWeightsTensor.get(),
115 m_InputToForgetWeightsTensor.get(),
116 m_InputToCellWeightsTensor.get(), m_InputToOutputWeightsTensor.get(),
117 m_RecurrentToInputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(),
118 m_RecurrentToCellWeightsTensor.get(), m_RecurrentToOutputWeightsTensor.get(),
119 m_InputGateBiasTensor.get(), m_ForgetGateBiasTensor.get(),
120 m_CellBiasTensor.get(),
121 m_OutputGateBiasTensor.get(), &cellStateInTensor, &outputStateInTensor,
122 &cellStateOutTensor, &outputStateOutTensor);
123 }
124
125 InitializeArmComputeClTensorData(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights);
126 InitializeArmComputeClTensorData(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights);
127 InitializeArmComputeClTensorData(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights);
128 InitializeArmComputeClTensorData(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights);
129 InitializeArmComputeClTensorData(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights);
130 InitializeArmComputeClTensorData(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights);
131 InitializeArmComputeClTensorData(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights);
132 InitializeArmComputeClTensorData(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights);
133 InitializeArmComputeClTensorData(*m_InputGateBiasTensor, m_Data.m_InputGateBias);
134 InitializeArmComputeClTensorData(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias);
135 InitializeArmComputeClTensorData(*m_CellBiasTensor, m_Data.m_CellBias);
136 InitializeArmComputeClTensorData(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias);
137
138 m_QuantizedLstmLayer.prepare();
139 FreeUnusedTensors();
140}
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)

References ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID, ClBaseWorkload< QuantizedLstmQueueDescriptor >::ClBaseWorkload(), armnn::info, armnn::InitializeArmComputeClTensorData(), and BaseWorkload< QuantizedLstmQueueDescriptor >::m_Data.

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 142 of file ClQuantizedLstmWorkload.cpp.

143{
144 ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizedLstmWorkload_Execute");
145 RunClFunction(m_QuantizedLstmLayer, CHECK_LOCATION());
146}
#define CHECK_LOCATION()
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)

References ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID, CHECK_LOCATION, and armnn::RunClFunction().


The documentation for this class was generated from the following files: