18 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
19 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
20 armnn::EmptyOptional(), \
23 armnn::WallClockTimer())
25 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
26 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
28 GetName() + "_" + name, \
30 armnn::WallClockTimer())
33 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label) \
34 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
36 this->GetName() + "_" + label, \
38 armnn::WallClockTimer())
40 using namespace armnn::armcomputetensorutils;
47 switch (convolutionMethod)
49 case arm_compute::ConvolutionMethod::FFT:
51 case arm_compute::ConvolutionMethod::DIRECT:
53 case arm_compute::ConvolutionMethod::GEMM:
55 case arm_compute::ConvolutionMethod::WINOGRAD:
65 InitialiseArmComputeTensorEmpty(dstTensor);
66 CopyArmComputeITensorData(srcData, dstTensor);
100 throw Exception(
"Unexpected tensor type during InitializeArmComputeTensorData().");
134 throw Exception(
"Unexpected tensor type during InitializeArmComputeTensorData().");
139 const std::vector<int>& m_end,
140 const std::vector<int>& m_stride)
146 unsigned int num_dims =
static_cast<unsigned int>(m_begin.size());
148 for (
unsigned int i = 0; i < num_dims; i++)
150 unsigned int revertedIndex = num_dims - i - 1;
152 starts.set(i,
static_cast<int>(m_begin[revertedIndex]));
153 ends.set(i,
static_cast<int>(m_end[revertedIndex]));
154 strides.set(i,
static_cast<int>(m_stride[revertedIndex]));
157 return std::make_tuple(starts, ends, strides);
161 const std::vector<unsigned int>& m_size)
168 unsigned int num_dims =
static_cast<unsigned int>(m_begin.size());
173 for (
unsigned int i = 0; i < num_dims; i++)
175 unsigned int revertedIndex = num_dims - i - 1;
177 starts.set(i,
static_cast<int>(m_begin[revertedIndex]));
178 ends.set(i,
static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
181 return std::make_tuple(starts, ends);
184 template <
typename DataType,
typename PayloadType>
187 ITensorHandle* tensorHandle = data.m_Outputs[idx];
188 return reinterpret_cast<DataType*
>(tensorHandle->Map());