17 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
18 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
19 armnn::EmptyOptional(), \
22 armnn::WallClockTimer())
24 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
25 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
27 GetName() + "_" + name, \
29 armnn::WallClockTimer())
32 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label) \
33 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
35 this->GetName() + "_" + label, \
37 armnn::WallClockTimer())
39 using namespace armnn::armcomputetensorutils;
46 switch (convolutionMethod)
48 case arm_compute::ConvolutionMethod::FFT:
50 case arm_compute::ConvolutionMethod::DIRECT:
52 case arm_compute::ConvolutionMethod::GEMM:
54 case arm_compute::ConvolutionMethod::WINOGRAD:
64 InitialiseArmComputeTensorEmpty(dstTensor);
65 CopyArmComputeITensorData(srcData, dstTensor);
100 throw Exception(
"Unexpected tensor type during InitializeArmComputeTensorData().");
135 throw Exception(
"Unexpected tensor type during InitializeArmComputeTensorData().");
140 const std::vector<int>& m_end,
141 const std::vector<int>& m_stride)
147 unsigned int num_dims =
static_cast<unsigned int>(m_begin.size());
149 for (
unsigned int i = 0; i < num_dims; i++)
151 unsigned int revertedIndex = num_dims - i - 1;
153 starts.set(i,
static_cast<int>(m_begin[revertedIndex]));
154 ends.set(i,
static_cast<int>(m_end[revertedIndex]));
155 strides.set(i,
static_cast<int>(m_stride[revertedIndex]));
158 return std::make_tuple(starts, ends, strides);
162 const std::vector<unsigned int>& m_size)
169 unsigned int num_dims =
static_cast<unsigned int>(m_begin.size());
174 for (
unsigned int i = 0; i < num_dims; i++)
176 unsigned int revertedIndex = num_dims - i - 1;
178 starts.set(i,
static_cast<int>(m_begin[revertedIndex]));
179 ends.set(i,
static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
182 return std::make_tuple(starts, ends);
185 template <
typename DataType,
typename PayloadType>
188 ITensorHandle* tensorHandle = data.m_Outputs[idx];
189 return reinterpret_cast<DataType*
>(tensorHandle->Map());