19 void QuantizeImpl(Decoder<float>& in, Encoder<float>& out,
size_t numValues)
21 for (
unsigned int i = 0; i < numValues; i++)
33 , m_NumElements(
info.m_InputTensorInfos[0].GetNumElements())
46 std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(
GetTensorInfo(inputs[0]), inputs[0]->
Map());
47 std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(
GetTensorInfo(outputs[0]), outputs[0]->
Map());
49 QuantizeImpl(*inputDecoder, *outputEncoder, m_NumElements);
#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
RefQuantizeWorkload(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &info)
void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.