ArmNN
 24.08
ClBatchNormalizationFloatWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "ClWorkloadUtils.hpp"
8 
12 #include <cl/ClLayerSupport.hpp>
13 #include <cl/ClTensorHandle.hpp>
14 
15 namespace armnn
16 {
17 using namespace armcomputetensorutils;
18 
20  const TensorInfo& output,
21  const TensorInfo& mean,
22  const TensorInfo& var,
23  const TensorInfo& beta,
24  const TensorInfo& gamma,
25  const BatchNormalizationDescriptor& descriptor,
26  const ActivationDescriptor* activationDescriptor)
27 {
28  const arm_compute::TensorInfo aclInputInfo =
29  armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
30  const arm_compute::TensorInfo aclOutputInfo =
31  armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
32  const arm_compute::TensorInfo aclMeanInfo =
33  armcomputetensorutils::BuildArmComputeTensorInfo(mean, descriptor.m_DataLayout);
34  const arm_compute::TensorInfo aclVarInfo =
35  armcomputetensorutils::BuildArmComputeTensorInfo(var, descriptor.m_DataLayout);
36  const arm_compute::TensorInfo aclBetaInfo =
37  armcomputetensorutils::BuildArmComputeTensorInfo(beta, descriptor.m_DataLayout);
38  const arm_compute::TensorInfo aclGammaInfo =
39  armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.m_DataLayout);
40 
41  const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
42  activationDescriptor);
43 
44  return arm_compute::CLBatchNormalizationLayer::validate(&aclInputInfo,
45  &aclOutputInfo,
46  &aclMeanInfo,
47  &aclVarInfo,
48  &aclBetaInfo,
49  &aclGammaInfo,
50  descriptor.m_Eps,
51  activationInfo);
52 }
53 
55  const BatchNormalizationQueueDescriptor& descriptor,
56  const WorkloadInfo& info,
57  const arm_compute::CLCompileContext& clCompileContext)
59 {
60  // Report Profiling Details
61  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClBatchNormalizationWorkload_Construct",
62  descriptor.m_Parameters,
63  info,
64  this->GetGuid());
65 
66  m_Mean = std::make_unique<arm_compute::CLTensor>();
67  BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo());
68 
69  m_Variance = std::make_unique<arm_compute::CLTensor>();
70  BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo());
71 
72  m_Gamma = std::make_unique<arm_compute::CLTensor>();
73  BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo());
74 
75  m_Beta = std::make_unique<arm_compute::CLTensor>();
76  BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo());
77 
78  m_Data.ValidateInputsOutputs("ClBatchNormalizationFloatWorkload", 1, 1);
79 
80  arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
81  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
82 
83  arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
84  input.info()->set_data_layout(aclDataLayout);
85  output.info()->set_data_layout(aclDataLayout);
86 
87  const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
88 
89  {
90  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_configure");
91  m_Layer.configure(clCompileContext,
92  &input,
93  &output,
94  m_Mean.get(),
95  m_Variance.get(),
96  m_Beta.get(),
97  m_Gamma.get(),
98  m_Data.m_Parameters.m_Eps,
99  activationInfo);
100  }
101 
102  InitializeArmComputeClTensorData(*m_Mean, m_Data.m_Mean);
103  InitializeArmComputeClTensorData(*m_Variance, m_Data.m_Variance);
104  InitializeArmComputeClTensorData(*m_Beta, m_Data.m_Beta);
105  InitializeArmComputeClTensorData(*m_Gamma, m_Data.m_Gamma);
106 
107  // Force Compute Library to perform the necessary copying and reshaping, after which
108  // delete all the input tensors that will no longer be needed
109  m_Layer.prepare();
110  FreeUnusedTensors();
111 }
112 
114 {
115  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_Execute");
116  RunClFunction(m_Layer, CHECK_LOCATION());
117 }
118 
119 void ClBatchNormalizationFloatWorkload::FreeUnusedTensors()
120 {
121  FreeTensorIfUnused(m_Mean);
122  FreeTensorIfUnused(m_Variance);
123  FreeTensorIfUnused(m_Gamma);
124  FreeTensorIfUnused(m_Beta);
125 }
126 
128 {
129  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
130  this->m_Data.m_Inputs[slot] = tensorHandle;
131  try
132  {
133  Reconfigure();
134  }
136  {
137  // Cannot reconfigure, revert the slot back and throw the exception.
138  this->m_Data.m_Inputs[slot] = backupHandle;
139  throw e;
140  }
141 }
142 
143 // Replace output tensor handle with the given TensorHandle
145 {
146  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
147  this->m_Data.m_Inputs[slot] = tensorHandle;
148  try
149  {
150  Reconfigure();
151  }
153  {
154  // Cannot reconfigure, revert the slot back and throw the exception.
155  this->m_Data.m_Inputs[slot] = backupHandle;
156  throw e;
157  }
158 }
159 
160 void ClBatchNormalizationFloatWorkload::Reconfigure()
161 {
162  throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
163 }
164 
165 } //namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::BatchNormalizationQueueDescriptor
Definition: WorkloadData.hpp:311
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::InitializeArmComputeClTensorData
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)
Definition: ClWorkloadUtils.hpp:124
armnn::BatchNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:843
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:447
armnn::ConvertAdditionalInfoToAclActivationLayerInfo
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
Definition: ArmComputeUtils.hpp:105
armnn::TensorInfo
Definition: Tensor.hpp:152
ClLayerSupport.hpp
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::TypedWorkload
Definition: Workload.hpp:101
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: ClWorkloadUtils.hpp:36
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
ClWorkloadUtils.hpp
armnn::ClBatchNormalizationFloatWorkload::ReplaceInputTensorHandle
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Definition: ClBatchNormalizationFloatWorkload.cpp:127
ArmComputeUtils.hpp
armnn::ConvertActivationDescriptorToAclActivationLayerInfo
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)
Definition: ArmComputeUtils.hpp:87
armnn::ClBatchNormalizationFloatWorkload::ReplaceOutputTensorHandle
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Definition: ClBatchNormalizationFloatWorkload.cpp:144
armnn::BoostLogSeverityMapping::info
@ info
ClBatchNormalizationFloatWorkload.hpp
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
ARMNN_REPORT_PROFILING_WORKLOAD_DESC
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
armnn::IClTensorHandle
Definition: IClTensorHandle.hpp:13
TensorHandle.hpp
armnn::Status
Status
Definition: Types.hpp:42
ClTensorHandle.hpp
armnn::BaseWorkload::m_Data
QueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::RunClFunction
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
Definition: ClWorkloadUtils.hpp:167
armnn::ClBatchNormalizationValidate
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchNormalizationFloatWorkload.cpp:19
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::UnimplementedException
Definition: Exceptions.hpp:98
armnn::BatchNormalizationDescriptor::m_Eps
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
Definition: Descriptors.hpp:841
armnn::ClBatchNormalizationFloatWorkload::Execute
void Execute() const override
Definition: ClBatchNormalizationFloatWorkload.cpp:113
armnn::ClBatchNormalizationFloatWorkload::ClBatchNormalizationFloatWorkload
ClBatchNormalizationFloatWorkload(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
Definition: ClBatchNormalizationFloatWorkload.cpp:54
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26