12#include <arm_compute/core/Types.h>
16using namespace armcomputetensorutils;
20 const arm_compute::TensorInfo neonInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
21 const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
23 return arm_compute::NEQuantizationLayer::validate(&neonInputInfo, &neonOutputInfo);
30 m_Data.ValidateInputsOutputs(
"NeonQuantizeWorkload", 1, 1);
33 m_Data.m_Inputs[0])->GetTensor();
35 m_Data.m_Outputs[0])->GetTensor();
37 m_Layer.reset(
new arm_compute::NEQuantizationLayer());
38 m_Layer->configure(&input, &output);
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
QuantizeQueueDescriptor m_Data
NeonBaseWorkload(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &info)
NeonQuantizeWorkload(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &workloadInfo)
void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
auto PolymorphicPointerDowncast(const SourceType &value)
Polymorphic downcast for shared pointers and build in pointers.
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Contains information about TensorInfos of a layer.