ArmNN
 24.02
NeonQuantizedLstmWorkload Class Reference

#include <NeonQuantizedLstmWorkload.hpp>

Inheritance diagram for NeonQuantizedLstmWorkload:
[legend]
Collaboration diagram for NeonQuantizedLstmWorkload:
[legend]

Public Member Functions

 NeonQuantizedLstmWorkload (const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
 
virtual void Execute () const override
 
- Public Member Functions inherited from NeonBaseWorkload< QuantizedLstmQueueDescriptor >
 NeonBaseWorkload (const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
- Public Member Functions inherited from BaseWorkload< QuantizedLstmQueueDescriptor >
 BaseWorkload (const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
 
virtual const std::string & GetName () const override
 
void ExecuteAsync (ExecutionData &executionData) override
 
void PostAllocationConfigure () override
 
const QuantizedLstmQueueDescriptorGetData () const
 
arm::pipe::ProfilingGuid GetGuid () const final
 
virtual bool SupportsTensorHandleReplacement () const override
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual arm::pipe::ProfilingGuid GetGuid () const =0
 
virtual bool SupportsTensorHandleReplacement () const =0
 
virtual const std::string & GetName () const =0
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()
 

Additional Inherited Members

- Protected Member Functions inherited from NeonBaseWorkload< QuantizedLstmQueueDescriptor >
virtual void Reconfigure ()
 
- Protected Attributes inherited from BaseWorkload< QuantizedLstmQueueDescriptor >
QuantizedLstmQueueDescriptor m_Data
 
const arm::pipe::ProfilingGuid m_Guid
 
const std::string m_Name
 

Detailed Description

Definition at line 18 of file NeonQuantizedLstmWorkload.hpp.

Constructor & Destructor Documentation

◆ NeonQuantizedLstmWorkload()

NeonQuantizedLstmWorkload ( const QuantizedLstmQueueDescriptor descriptor,
const WorkloadInfo info 
)

Definition at line 17 of file NeonQuantizedLstmWorkload.cpp.

19  : NeonBaseWorkload<QuantizedLstmQueueDescriptor>(descriptor, info)
20 {
21  // Basic parameters
22  m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
23  BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
24 
25  m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
26  BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
27 
28  m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
29  BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
30 
31  m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
32  BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
33 
34  m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
35  BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
36 
37  m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
38  BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
39 
40  m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
41  BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
42 
43  m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
44  BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
45 
46  m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
47  BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
48 
49  m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
50  BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
51 
52  m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
53  BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
54 
55  m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
56  BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
57 
58  const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
59  arm_compute::ITensor& cell_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
60  const arm_compute::ITensor& output_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
61 
62  arm_compute::ITensor& cell_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
63  arm_compute::ITensor& output_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
64 
65  m_QuantizedLstmLayer.configure(&input,
66  m_InputToInputWeightsTensor.get(),
67  m_InputToForgetWeightsTensor.get(),
68  m_InputToCellWeightsTensor.get(),
69  m_InputToOutputWeightsTensor.get(),
70  m_RecurrentToInputWeightsTensor.get(),
71  m_RecurrentToForgetWeightsTensor.get(),
72  m_RecurrentToCellWeightsTensor.get(),
73  m_RecurrentToOutputWeightsTensor.get(),
74  m_InputGateBiasTensor.get(),
75  m_ForgetGateBiasTensor.get(),
76  m_CellBiasTensor.get(),
77  m_OutputGateBiasTensor.get(),
78  &cell_state_in,
79  &output_state_in,
80  &cell_state_out,
81  &output_state_out);
82 
83  InitializeArmComputeTensorData(*m_InputToInputWeightsTensor,
85 
86  InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor,
88 
89  InitializeArmComputeTensorData(*m_InputToCellWeightsTensor,
91 
92  InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor,
94 
95  InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
97 
98  InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
100 
101  InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
103 
104  InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
106 
107  InitializeArmComputeTensorData(*m_InputGateBiasTensor,
109 
110  InitializeArmComputeTensorData(*m_ForgetGateBiasTensor,
112 
113  InitializeArmComputeTensorData(*m_CellBiasTensor,
115 
116  InitializeArmComputeTensorData(*m_OutputGateBiasTensor,
118 
119  // Force Compute Library to perform the necessary copying and reshaping, after which
120  // delete all the input tensors that will no longer be needed
121  m_QuantizedLstmLayer.prepare();
122  FreeUnusedTensors();
123 }

References armnn::info.

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 125 of file NeonQuantizedLstmWorkload.cpp.

126 {
127  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonQuantizedLstmWorkload_Execute");
128  m_QuantizedLstmLayer.run();
129 }

References ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID.


The documentation for this class was generated from the following files:
armnn::QuantizedLstmQueueDescriptor::m_CellBias
const ConstTensorHandle * m_CellBias
Definition: WorkloadData.hpp:645
armnn::QuantizedLstmQueueDescriptor::m_RecurrentToInputWeights
const ConstTensorHandle * m_RecurrentToInputWeights
Definition: WorkloadData.hpp:638
armnn::QuantizedLstmQueueDescriptor::m_InputToForgetWeights
const ConstTensorHandle * m_InputToForgetWeights
Definition: WorkloadData.hpp:634
armnn::QuantizedLstmQueueDescriptor::m_RecurrentToOutputWeights
const ConstTensorHandle * m_RecurrentToOutputWeights
Definition: WorkloadData.hpp:641
armnn::QuantizedLstmQueueDescriptor::m_InputGateBias
const ConstTensorHandle * m_InputGateBias
Definition: WorkloadData.hpp:643
armnn::ConstTensorHandle::GetTensorInfo
const TensorInfo & GetTensorInfo() const
Definition: TensorHandle.hpp:40
armnn::InitializeArmComputeTensorData
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
Definition: NeonWorkloadUtils.hpp:68
armnn::QuantizedLstmQueueDescriptor::m_RecurrentToCellWeights
const ConstTensorHandle * m_RecurrentToCellWeights
Definition: WorkloadData.hpp:640
armnn::QuantizedLstmQueueDescriptor::m_InputToInputWeights
const ConstTensorHandle * m_InputToInputWeights
Definition: WorkloadData.hpp:633
armnn::QuantizedLstmQueueDescriptor::m_RecurrentToForgetWeights
const ConstTensorHandle * m_RecurrentToForgetWeights
Definition: WorkloadData.hpp:639
armnn::QuantizedLstmQueueDescriptor::m_InputToCellWeights
const ConstTensorHandle * m_InputToCellWeights
Definition: WorkloadData.hpp:635
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::QuantizedLstmQueueDescriptor::m_InputToOutputWeights
const ConstTensorHandle * m_InputToOutputWeights
Definition: WorkloadData.hpp:636
armnn::BaseWorkload< QuantizedLstmQueueDescriptor >::m_Data
QuantizedLstmQueueDescriptor m_Data
Definition: Workload.hpp:89
ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: NeonWorkloadUtils.hpp:32
armnn::QuantizedLstmQueueDescriptor::m_OutputGateBias
const ConstTensorHandle * m_OutputGateBias
Definition: WorkloadData.hpp:646
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::QuantizedLstmQueueDescriptor::m_ForgetGateBias
const ConstTensorHandle * m_ForgetGateBias
Definition: WorkloadData.hpp:644