ArmNN
 24.08
NeonQuantizeWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "NeonWorkloadUtils.hpp"
8 
12 #include <arm_compute/core/Types.h>
13 
14 namespace armnn
15 {
16 using namespace armcomputetensorutils;
17 
19 {
20  const arm_compute::TensorInfo neonInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
21  const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
22 
23  return arm_compute::NEQuantizationLayer::validate(&neonInputInfo, &neonOutputInfo);
24 }
25 
27  const WorkloadInfo& workloadInfo)
28  : NeonBaseWorkload<QuantizeQueueDescriptor>(descriptor, workloadInfo)
29 {
30  m_Data.ValidateInputsOutputs("NeonQuantizeWorkload", 1, 1);
31 
32  arm_compute::ITensor& input = PolymorphicPointerDowncast<IAclTensorHandle>(
33  m_Data.m_Inputs[0])->GetTensor();
34  arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>(
35  m_Data.m_Outputs[0])->GetTensor();
36 
37  m_Layer.reset(new arm_compute::NEQuantizationLayer());
38  m_Layer->configure(&input, &output);
39  m_Layer->prepare();
40 }
41 
43 {
44  if (m_Layer)
45  {
46  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonQuantizeWorkload_Execute");
47  m_Layer->run();
48  }
49 }
50 
51 } // namespace armnn
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:447
armnn::QuantizeQueueDescriptor
Definition: WorkloadData.hpp:299
armnn::TensorInfo
Definition: Tensor.hpp:152
NeonTensorHandle.hpp
NeonQuantizeWorkload.hpp
armnn::NeonQuantizeWorkload::Execute
void Execute() const override
Definition: NeonQuantizeWorkload.cpp:42
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
PolymorphicDowncast.hpp
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::NeonQuantizeWorkloadValidate
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonQuantizeWorkload.cpp:18
armnn::Status
Status
Definition: Types.hpp:42
armnn::BaseWorkload< QuantizeQueueDescriptor >::m_Data
QuantizeQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::NeonQuantizeWorkload::NeonQuantizeWorkload
NeonQuantizeWorkload(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &workloadInfo)
Definition: NeonQuantizeWorkload.cpp:26
NeonWorkloadUtils.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: NeonWorkloadUtils.hpp:33
armnn::NeonBaseWorkload
Definition: NeonBaseWorkload.hpp:13
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26