ArmNN
 24.08
ClPooling2dWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include <cl/ClLayerSupport.hpp>
8 #include <cl/ClTensorHandle.hpp>
11 
12 #include "ClWorkloadUtils.hpp"
13 
14 namespace armnn
15 {
16 using namespace armcomputetensorutils;
17 
19  const TensorInfo& output,
20  const Pooling2dDescriptor& descriptor)
21 {
22  const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
23  const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
24 
25  arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor);
26 
27  return arm_compute::CLPoolingLayer::validate(&aclInputInfo, &aclOutputInfo, layerInfo);
28 }
29 
31  const Pooling2dQueueDescriptor& descriptor,
32  const WorkloadInfo& info,
33  const arm_compute::CLCompileContext& clCompileContext)
35 {
36  // Report Profiling Details
37  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClPooling2dWorkload_Construct",
38  descriptor.m_Parameters,
39  info,
40  this->GetGuid());
41 
42  m_Data.ValidateInputsOutputs("ClPooling2dWorkload", 1, 1);
43 
44  arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
45  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
46 
47  arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
48  input.info()->set_data_layout(aclDataLayout);
49  output.info()->set_data_layout(aclDataLayout);
50 
51  // flag to use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy
52  // enable fp_mixed_precision for the the FP16 cases that
53  // accumulation reaches a limit beyond which there is no more increment of the value
54  bool fpMixedPrecision = false;
55 
56  arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(m_Data.m_Parameters, fpMixedPrecision);
57 
58  {
59  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling2dWorkload_configure");
60  // Run the layer.
61  m_PoolingLayer.configure(clCompileContext, &input, &output, layerInfo);
62  }
63 }
64 
66 {
67  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClPooling2dWorkload_Execute");
68  RunClFunction(m_PoolingLayer, CHECK_LOCATION());
69 }
70 
71 }
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:447
armnn::TensorInfo
Definition: Tensor.hpp:152
ClLayerSupport.hpp
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
ClPooling2dWorkload.hpp
armnn::ClBaseWorkload
Definition: ClBaseWorkload.hpp:13
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:427
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: ClWorkloadUtils.hpp:36
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
ClWorkloadUtils.hpp
ArmComputeUtils.hpp
armnn::BoostLogSeverityMapping::info
@ info
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
ARMNN_REPORT_PROFILING_WORKLOAD_DESC
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
armnn::IClTensorHandle
Definition: IClTensorHandle.hpp:13
armnn::Pooling2dQueueDescriptor
Definition: WorkloadData.hpp:197
armnn::ClPooling2dWorkloadValidate
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: ClPooling2dWorkload.cpp:18
armnn::Status
Status
Definition: Types.hpp:42
ClTensorHandle.hpp
armnn::BaseWorkload< Pooling2dQueueDescriptor >::m_Data
Pooling2dQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::RunClFunction
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
Definition: ClWorkloadUtils.hpp:167
armnn::ClPooling2dWorkload::Execute
void Execute() const override
Definition: ClPooling2dWorkload.cpp:65
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::ClPooling2dWorkload::ClPooling2dWorkload
ClPooling2dWorkload(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
Definition: ClPooling2dWorkload.cpp:30
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26