ArmNN
 25.11
Loading...
Searching...
No Matches
ClBatchNormalizationFloatWorkload Class Reference

#include <ClBatchNormalizationFloatWorkload.hpp>

Inheritance diagram for ClBatchNormalizationFloatWorkload:
[legend]
Collaboration diagram for ClBatchNormalizationFloatWorkload:
[legend]

Public Member Functions

 ClBatchNormalizationFloatWorkload (const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
void Execute () const override
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
Public Member Functions inherited from TypedWorkload< QueueDescriptor, armnn::DataType::Float16, armnn::DataType::Float32 >
 TypedWorkload (const QueueDescriptor &descriptor, const WorkloadInfo &info)
Public Member Functions inherited from BaseWorkload< QueueDescriptor >
 BaseWorkload (const QueueDescriptor &descriptor, const WorkloadInfo &info)
virtual const std::string & GetName () const override
void PostAllocationConfigure () override
const QueueDescriptorGetData () const
arm::pipe::ProfilingGuid GetGuid () const final
virtual bool SupportsTensorHandleReplacement () const override
Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()

Additional Inherited Members

Protected Attributes inherited from BaseWorkload< QueueDescriptor >
QueueDescriptor m_Data
const arm::pipe::ProfilingGuid m_Guid
const std::string m_Name

Detailed Description

Definition at line 25 of file ClBatchNormalizationFloatWorkload.hpp.

Constructor & Destructor Documentation

◆ ClBatchNormalizationFloatWorkload()

ClBatchNormalizationFloatWorkload ( const BatchNormalizationQueueDescriptor & descriptor,
const WorkloadInfo & info,
const arm_compute::CLCompileContext & clCompileContext )

Definition at line 54 of file ClBatchNormalizationFloatWorkload.cpp.

58 : FloatWorkload<BatchNormalizationQueueDescriptor>(descriptor, info)
59{
60 // Report Profiling Details
61 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClBatchNormalizationWorkload_Construct",
62 descriptor.m_Parameters,
63 info,
64 this->GetGuid());
65
66 m_Mean = std::make_unique<arm_compute::CLTensor>();
67 BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo());
68
69 m_Variance = std::make_unique<arm_compute::CLTensor>();
70 BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo());
71
72 m_Gamma = std::make_unique<arm_compute::CLTensor>();
73 BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo());
74
75 m_Beta = std::make_unique<arm_compute::CLTensor>();
76 BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo());
77
78 m_Data.ValidateInputsOutputs("ClBatchNormalizationFloatWorkload", 1, 1);
79
80 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
81 arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
82
83 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
84 input.info()->set_data_layout(aclDataLayout);
85 output.info()->set_data_layout(aclDataLayout);
86
87 const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
88
89 {
90 ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_configure");
91 m_Layer.configure(clCompileContext,
92 &input,
93 &output,
94 m_Mean.get(),
95 m_Variance.get(),
96 m_Beta.get(),
97 m_Gamma.get(),
98 m_Data.m_Parameters.m_Eps,
99 activationInfo);
100 }
101
102 InitializeArmComputeClTensorData(*m_Mean, m_Data.m_Mean);
103 InitializeArmComputeClTensorData(*m_Variance, m_Data.m_Variance);
104 InitializeArmComputeClTensorData(*m_Beta, m_Data.m_Beta);
105 InitializeArmComputeClTensorData(*m_Gamma, m_Data.m_Gamma);
106
107 // Force Compute Library to perform the necessary copying and reshaping, after which
108 // delete all the input tensors that will no longer be needed
109 m_Layer.prepare();
110 FreeUnusedTensors();
111}
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID, armnn::ConvertAdditionalInfoToAclActivationLayerInfo(), armnn::info, armnn::InitializeArmComputeClTensorData(), BaseWorkload< QueueDescriptor >::m_Data, and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 113 of file ClBatchNormalizationFloatWorkload.cpp.

114{
115 ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_Execute");
116 RunClFunction(m_Layer, CHECK_LOCATION());
117}
#define CHECK_LOCATION()
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)

References ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID, CHECK_LOCATION, and armnn::RunClFunction().

◆ ReplaceInputTensorHandle()

void ReplaceInputTensorHandle ( ITensorHandle * tensorHandle,
unsigned int slot )
overridevirtual

Reimplemented from BaseWorkload< QueueDescriptor >.

Definition at line 127 of file ClBatchNormalizationFloatWorkload.cpp.

128{
129 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
130 this->m_Data.m_Inputs[slot] = tensorHandle;
131 try
132 {
133 Reconfigure();
134 }
135 catch(armnn::UnimplementedException& e)
136 {
137 // Cannot reconfigure, revert the slot back and throw the exception.
138 this->m_Data.m_Inputs[slot] = backupHandle;
139 throw e;
140 }
141}

References BaseWorkload< QueueDescriptor >::m_Data.

◆ ReplaceOutputTensorHandle()

void ReplaceOutputTensorHandle ( ITensorHandle * tensorHandle,
unsigned int slot )
overridevirtual

Reimplemented from BaseWorkload< QueueDescriptor >.

Definition at line 144 of file ClBatchNormalizationFloatWorkload.cpp.

145{
146 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
147 this->m_Data.m_Inputs[slot] = tensorHandle;
148 try
149 {
150 Reconfigure();
151 }
152 catch(armnn::UnimplementedException& e)
153 {
154 // Cannot reconfigure, revert the slot back and throw the exception.
155 this->m_Data.m_Inputs[slot] = backupHandle;
156 throw e;
157 }
158}

References BaseWorkload< QueueDescriptor >::m_Data.


The documentation for this class was generated from the following files: