12 #include <arm_compute/core/Types.h>
16 using namespace armcomputetensorutils;
20 const arm_compute::TensorInfo neonInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
21 const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
23 return arm_compute::NEQuantizationLayer::validate(&neonInputInfo, &neonOutputInfo);
32 arm_compute::ITensor& input = PolymorphicPointerDowncast<IAclTensorHandle>(
34 arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>(
37 m_Layer.reset(
new arm_compute::NEQuantizationLayer());
38 m_Layer->configure(&input, &output);
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
NeonQuantizeWorkload(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &workloadInfo)
void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Contains information about TensorInfos of a layer.