22 #define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label) \
23 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuRef, \
25 this->GetName() + "_" + label, \
26 armnn::WallClockTimer())
32 template <
typename TensorHandleType = RefTensorHandle>
36 const TensorHandleType* refTensorHandle =
37 PolymorphicDowncast<const TensorHandleType*>(tensorHandle);
38 return refTensorHandle->GetTensorInfo();
41 template <
typename DataType,
typename PayloadType>
45 return reinterpret_cast<const DataType*
>(tensorHandle->
Map());
48 template <
typename DataType,
typename PayloadType>
51 ITensorHandle* tensorHandle = data.m_Outputs[idx];
52 return reinterpret_cast<DataType*
>(tensorHandle->Map());
55 template <
typename DataType>
58 return reinterpret_cast<DataType*
>(tensorHandle->
Map());
61 template <
typename PayloadType>
64 return GetInputTensorData<float>(idx, data);
67 template <
typename PayloadType>
70 return GetOutputTensorData<float>(idx, data);
73 template <
typename PayloadType>
76 return GetInputTensorData<Half>(idx, data);
79 template <
typename PayloadType>
82 return GetOutputTensorData<Half>(idx, data);
85 template <
typename PayloadType>
88 return GetInputTensorData<BFloat16>(idx, data);
91 template <
typename PayloadType>
94 return GetOutputTensorData<BFloat16>(idx, data);
104 std::vector<float> ret(
info.GetNumElements());
105 for (
size_t i = 0; i <
info.GetNumElements(); i++)
115 for (
unsigned int i = 0; i <
info.GetNumElements(); i++)
117 outputData[i] = Dequantize<T>(inputData[i],
info.GetQuantizationScale(),
info.GetQuantizationOffset());
123 for (
size_t i = 0; i <
info.GetNumElements(); i++)
125 quant[i] = armnn::Quantize<uint8_t>(dequant[i],
info.GetQuantizationScale(),
info.GetQuantizationOffset());
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
Copyright (c) 2021 ARM Limited and Contributors.
const Half * GetInputTensorDataHalf(unsigned int idx, const PayloadType &data)
const DataType * GetInputTensorData(unsigned int idx, const PayloadType &data)
const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
BFloat16 * GetOutputTensorDataBFloat16(unsigned int idx, const PayloadType &data)
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
float * GetOutputTensorDataFloat(unsigned int idx, const PayloadType &data)
Half * GetOutputTensorDataHalf(unsigned int idx, const PayloadType &data)
const BFloat16 * GetInputTensorDataBFloat16(unsigned int idx, const PayloadType &data)
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)