22 #define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label) \
23 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuRef, \
25 this->GetName() + "_" + label, \
26 armnn::WallClockTimer())
32 template <
typename TensorHandleType = RefTensorHandle>
36 const TensorHandleType* refTensorHandle =
37 PolymorphicDowncast<const TensorHandleType*>(tensorHandle);
38 return refTensorHandle->GetTensorInfo();
41 template <
typename DataType,
typename PayloadType>
45 return reinterpret_cast<const DataType*
>(tensorHandle->
Map());
48 template <
typename DataType,
typename PayloadType>
51 ITensorHandle* tensorHandle = data.m_Outputs[idx];
52 return reinterpret_cast<DataType*
>(tensorHandle->Map());
55 template <
typename DataType>
58 return reinterpret_cast<DataType*
>(tensorHandle->
Map());
61 template <
typename PayloadType>
64 return GetInputTensorData<float>(idx, data);
67 template <
typename PayloadType>
70 return GetOutputTensorData<float>(idx, data);
73 template <
typename PayloadType>
76 return GetInputTensorData<Half>(idx, data);
79 template <
typename PayloadType>
82 return GetOutputTensorData<Half>(idx, data);
85 template <
typename PayloadType>
88 return GetInputTensorData<BFloat16>(idx, data);
91 template <
typename PayloadType>
94 return GetOutputTensorData<BFloat16>(idx, data);
104 std::vector<float> ret(
info.GetNumElements());
105 for (
size_t i = 0; i <
info.GetNumElements(); i++)
115 for (
unsigned int i = 0; i <
info.GetNumElements(); i++)
117 outputData[i] = Dequantize<T>(inputData[i],
info.GetQuantizationScale(),
info.GetQuantizationOffset());
123 for (
size_t i = 0; i <
info.GetNumElements(); i++)
125 quant[i] = armnn::Quantize<uint8_t>(dequant[i],
info.GetQuantizationScale(),
info.GetQuantizationOffset());