ArmNN
 24.08
ClQuantizeWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClQuantizeWorkload.hpp"
7 #include "ClWorkloadUtils.hpp"
8 
11 
13 
14 #include <cl/ClLayerSupport.hpp>
15 #include <cl/ClTensorHandle.hpp>
16 #include <cl/ClLayerSupport.hpp>
17 
18 namespace armnn
19 {
20 using namespace armcomputetensorutils;
21 
23  const TensorInfo& output)
24 {
25  const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
26  const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
27 
28  return arm_compute::CLQuantizationLayer::validate(&aclInputInfo,
29  &aclOutputInfo);
30 }
31 
33  const WorkloadInfo& info,
34  const arm_compute::CLCompileContext& clCompileContext)
36 {
37  m_Data.ValidateInputsOutputs("ClQuantizeWorkload", 1, 1);
38 
39  arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
40  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
41 
42  {
43  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizeWorkload_configure");
44  m_Layer.configure(clCompileContext, &input, &output);
45  }
46 }
47 
49 {
50  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClQuantizeWorkload_Execute");
51  RunClFunction(m_Layer, CHECK_LOCATION());
52 }
53 
54 } //namespace armnn
armnn::ClQuantizeWorkloadValidate
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClQuantizeWorkload.cpp:22
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:447
armnn::QuantizeQueueDescriptor
Definition: WorkloadData.hpp:299
armnn::TensorInfo
Definition: Tensor.hpp:152
ClLayerSupport.hpp
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::ClBaseWorkload
Definition: ClBaseWorkload.hpp:13
armnn::ClQuantizeWorkload::ClQuantizeWorkload
ClQuantizeWorkload(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
Definition: ClQuantizeWorkload.cpp:32
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: ClWorkloadUtils.hpp:36
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
ClWorkloadUtils.hpp
armnn::ClQuantizeWorkload::Execute
void Execute() const override
Definition: ClQuantizeWorkload.cpp:48
ArmComputeUtils.hpp
armnn::BoostLogSeverityMapping::info
@ info
ClQuantizeWorkload.hpp
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::IClTensorHandle
Definition: IClTensorHandle.hpp:13
TensorHandle.hpp
armnn::Status
Status
Definition: Types.hpp:42
ClTensorHandle.hpp
armnn::BaseWorkload< QuantizeQueueDescriptor >::m_Data
QuantizeQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::RunClFunction
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
Definition: ClWorkloadUtils.hpp:167
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26