24.02
|
Go to the documentation of this file.
25 return reinterpret_cast<const float*
>(tensorHandle->
Map());
31 return reinterpret_cast<float*
>(tensorHandle->
Map());
36 : BaseWorkload(descriptor, info)
42 unsigned int num = info.GetNumElements();
48 for (
unsigned int i = 0; i < num; ++i)
50 outputData[i] = inputData0[i] + inputData1[i];
void Execute() const override
const armnn::TensorInfo & GetTensorInfo(const armnn::ITensorHandle *tensorHandle)
SampleDynamicAdditionWorkload(const armnn::AdditionQueueDescriptor &descriptor, const armnn::WorkloadInfo &info)
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Outputs
armnn::AdditionQueueDescriptor m_Data
const armnn::TensorInfo & GetTensorInfo() const
const float * GetInputTensorData(unsigned int idx, const armnn::AdditionQueueDescriptor &data)
std::vector< ITensorHandle * > m_Inputs
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
float * GetOutputTensorData(unsigned int idx, const armnn::AdditionQueueDescriptor &data)