22#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label) \
23ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuRef, \
25 this->GetName() + "_" + label, \
26 armnn::WallClockTimer())
32template <
typename TensorHandleType = RefTensorHandle>
36 const TensorHandleType* refTensorHandle =
38 return refTensorHandle->GetTensorInfo();
41template <
typename DataType,
typename PayloadType>
45 return reinterpret_cast<const DataType*
>(tensorHandle->
Map());
48template <
typename DataType,
typename PayloadType>
51 ITensorHandle* tensorHandle = data.m_Outputs[idx];
52 return reinterpret_cast<DataType*
>(tensorHandle->Map());
55template <
typename DataType>
58 return reinterpret_cast<DataType*
>(tensorHandle->
Map());
61template <
typename PayloadType>
67template <
typename PayloadType>
73template <
typename PayloadType>
79template <
typename PayloadType>
85template <
typename PayloadType>
91template <
typename PayloadType>
104 std::vector<float> ret(
info.GetNumElements());
105 for (
size_t i = 0; i <
info.GetNumElements(); i++)
115 for (
unsigned int i = 0; i <
info.GetNumElements(); i++)
117 outputData[i] =
Dequantize<T>(inputData[i],
info.GetQuantizationScale(),
info.GetQuantizationOffset());
123 for (
size_t i = 0; i <
info.GetNumElements(); i++)
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
Copyright (c) 2021 ARM Limited and Contributors.
Half * GetOutputTensorDataHalf(unsigned int idx, const PayloadType &data)
const DataType * GetInputTensorData(unsigned int idx, const PayloadType &data)
const BFloat16 * GetInputTensorDataBFloat16(unsigned int idx, const PayloadType &data)
BFloat16 * GetOutputTensorDataBFloat16(unsigned int idx, const PayloadType &data)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
const Half * GetInputTensorDataHalf(unsigned int idx, const PayloadType &data)
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
float * GetOutputTensorDataFloat(unsigned int idx, const PayloadType &data)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)