ArmNN
 24.08
NeonWorkloadUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Exceptions.hpp>
11 #include <neon/NeonTimer.hpp>
13 
14 #include <armnn/Utils.hpp>
15 
16 #include <Half.hpp>
17 
18 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
19  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
20  armnn::EmptyOptional(), \
21  name, \
22  armnn::NeonTimer(), \
23  armnn::WallClockTimer())
24 
25 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
26  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
27  guid, \
28  GetName() + "_" + name, \
29  armnn::NeonTimer(), \
30  armnn::WallClockTimer())
31 
32 /// Creates a profiling event that uses GetGuid() and GetName() from the calling class
33 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label) \
34  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
35  this->GetGuid(), \
36  this->GetName() + "_" + label, \
37  armnn::NeonTimer(), \
38  armnn::WallClockTimer())
39 
40 using namespace armnn::armcomputetensorutils;
41 
42 namespace armnn
43 {
44 
45 inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
46 {
47  switch (convolutionMethod)
48  {
49  case arm_compute::ConvolutionMethod::FFT:
50  return "FFT";
51  case arm_compute::ConvolutionMethod::DIRECT:
52  return "Direct";
53  case arm_compute::ConvolutionMethod::GEMM:
54  return "GEMM";
55  case arm_compute::ConvolutionMethod::WINOGRAD:
56  return "Winograd";
57  default:
58  return "Unknown";
59  }
60 }
61 
62 template <typename T>
63 void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
64 {
65  InitialiseArmComputeTensorEmpty(dstTensor);
66  CopyArmComputeITensorData(srcData, dstTensor);
67 }
68 
69 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
70  TensorInfo tensorInfo,
71  const ITensorHandle* handle)
72 {
73  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
74  switch(tensorInfo.GetDataType())
75  {
76  case DataType::Float16:
77  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::Half*>(handle->Map()));
78  break;
79  case DataType::Float32:
80  CopyArmComputeTensorData(tensor, reinterpret_cast<const float*>(handle->Map()));
81  break;
82  case DataType::QAsymmU8:
83  CopyArmComputeTensorData(tensor, reinterpret_cast<const uint8_t*>(handle->Map()));
84  break;
85  case DataType::QSymmS8:
86  case DataType::QAsymmS8:
87  CopyArmComputeTensorData(tensor, reinterpret_cast<const int8_t*>(handle->Map()));
88  break;
89  case DataType::Signed32:
90  CopyArmComputeTensorData(tensor, reinterpret_cast<const int32_t*>(handle->Map()));
91  break;
92  case DataType::QSymmS16:
93  CopyArmComputeTensorData(tensor, reinterpret_cast<const int16_t*>(handle->Map()));
94  break;
95  case DataType::BFloat16:
96  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::BFloat16*>(handle->Map()));
97  break;
98  default:
99  // Throw exception; assertion not called in release build.
100  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
101  }
102 };
103 
104 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
105  const ConstTensorHandle* handle)
106 {
107  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
108  switch(handle->GetTensorInfo().GetDataType())
109  {
110  case DataType::Float16:
112  break;
113  case DataType::Float32:
114  CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
115  break;
116  case DataType::QAsymmU8:
117  CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
118  break;
119  case DataType::QSymmS8:
120  case DataType::QAsymmS8:
121  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
122  break;
123  case DataType::Signed32:
124  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
125  break;
126  case DataType::QSymmS16:
127  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int16_t>());
128  break;
129  case DataType::BFloat16:
131  break;
132  default:
133  // Throw exception; assertion not called in release build.
134  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
135  }
136 };
137 
138 inline auto SetNeonStridedSliceData(const std::vector<int>& m_begin,
139  const std::vector<int>& m_end,
140  const std::vector<int>& m_stride)
141 {
144  arm_compute::Coordinates strides;
145 
146  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
147 
148  for (unsigned int i = 0; i < num_dims; i++)
149  {
150  unsigned int revertedIndex = num_dims - i - 1;
151 
152  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
153  ends.set(i, static_cast<int>(m_end[revertedIndex]));
154  strides.set(i, static_cast<int>(m_stride[revertedIndex]));
155  }
156 
157  return std::make_tuple(starts, ends, strides);
158 }
159 
160 inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
161  const std::vector<unsigned int>& m_size)
162 {
163  // This function must translate the size vector given to an end vector
164  // expected by the ACL NESlice workload
167 
168  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
169 
170  // For strided slices, we have the relationship size = (end - begin) / stride
171  // For slice, we assume stride to be a vector of all ones, yielding the formula
172  // size = (end - begin) therefore we know end = size + begin
173  for (unsigned int i = 0; i < num_dims; i++)
174  {
175  unsigned int revertedIndex = num_dims - i - 1;
176 
177  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
178  ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
179  }
180 
181  return std::make_tuple(starts, ends);
182 }
183 
184 template <typename DataType, typename PayloadType>
185 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
186 {
187  ITensorHandle* tensorHandle = data.m_Outputs[idx];
188  return reinterpret_cast<DataType*>(tensorHandle->Map());
189 }
190 
191 } //namespace armnn
armnn::ConstTensorHandle
Definition: TensorHandle.hpp:24
armnn::GetOutputTensorData
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
Definition: ClWorkloadUtils.hpp:180
armnn::TensorInfo
Definition: Tensor.hpp:152
NeonTensorHandle.hpp
armnn::SetNeonStridedSliceData
auto SetNeonStridedSliceData(const std::vector< int > &m_begin, const std::vector< int > &m_end, const std::vector< int > &m_stride)
Definition: NeonWorkloadUtils.hpp:138
armnn::DataType::Float32
@ Float32
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::ConstTensorHandle::GetTensorInfo
const TensorInfo & GetTensorInfo() const
Definition: TensorHandle.hpp:40
armnn::DataType::QSymmS8
@ QSymmS8
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::DataType::QSymmS16
@ QSymmS16
armnn::DataType::BFloat16
@ BFloat16
armnn::InitializeArmComputeTensorData
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
Definition: NeonWorkloadUtils.hpp:69
armnn::ConstTensorHandle::GetConstTensor
const T * GetConstTensor() const
Definition: TensorHandle.hpp:28
armnn::DataType::Float16
@ Float16
Utils.hpp
armnn::DataType
DataType
Definition: Types.hpp:48
Workload.hpp
armnn::GetConvolutionMethodString
std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod &convolutionMethod)
Definition: ClWorkloadUtils.hpp:46
armnn::SetNeonSliceData
auto SetNeonSliceData(const std::vector< unsigned int > &m_begin, const std::vector< unsigned int > &m_size)
Definition: NeonWorkloadUtils.hpp:160
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::DataType::Signed32
@ Signed32
armnn::DataType::QAsymmS8
@ QAsymmS8
Half.hpp
TensorHandle.hpp
armnn::BFloat16
Definition: BFloat16.hpp:15
Exceptions.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
NeonTimer.hpp
armnn::CopyArmComputeTensorData
void CopyArmComputeTensorData(arm_compute::Tensor &dstTensor, const T *srcData)
Definition: NeonWorkloadUtils.hpp:63
armnn::ITensorHandle::Map
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
ARMNN_THROW_INVALIDARG_MSG_IF_FALSE
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
Definition: Exceptions.hpp:210