19 void QuantizeImpl(Decoder<float>& in, Encoder<float>& out,
size_t numValues)
21 for (
unsigned int i = 0; i < numValues; i++)
33 , m_NumElements(info.m_InputTensorInfos[0].GetNumElements())
49 std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(
GetTensorInfo(inputs[0]), inputs[0]->Map());
50 std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(
GetTensorInfo(outputs[0]), outputs[0]->Map());
52 QuantizeImpl(*inputDecoder, *outputEncoder, m_NumElements);
Copyright (c) 2021 ARM Limited and Contributors.
std::vector< ITensorHandle * > m_Inputs
QuantizeQueueDescriptor m_Data
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
void Execute() const override
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
RefQuantizeWorkload(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers