ArmNN
 24.08
NeonTensorHandle.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <BFloat16.hpp>
9 #include <Half.hpp>
10 
13 #include <armnn/Exceptions.hpp>
15 
16 #include <arm_compute/runtime/MemoryGroup.h>
17 #include <arm_compute/runtime/IMemoryGroup.h>
18 #include <arm_compute/runtime/Tensor.h>
19 #include <arm_compute/runtime/SubTensor.h>
20 #include <arm_compute/core/TensorShape.h>
21 #include <arm_compute/core/Coordinates.h>
22 #include "armnn/TypesUtils.hpp"
23 
24 namespace armnn
25 {
26 class NeonTensorHandleDecorator;
27 
29 {
30 public:
31  NeonTensorHandle(const TensorInfo& tensorInfo)
32  : m_ImportFlags(static_cast<MemorySourceFlags>(MemorySource::Malloc)),
33  m_Imported(false),
34  m_IsImportEnabled(false),
35  m_TypeAlignment(GetDataTypeSize(tensorInfo.GetDataType()))
36  {
37  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
38  }
39 
40  NeonTensorHandle(const TensorInfo& tensorInfo,
41  DataLayout dataLayout,
42  MemorySourceFlags importFlags = static_cast<MemorySourceFlags>(MemorySource::Malloc))
43  : m_ImportFlags(importFlags),
44  m_Imported(false),
45  m_IsImportEnabled(false),
46  m_TypeAlignment(GetDataTypeSize(tensorInfo.GetDataType()))
47 
48 
49  {
50  armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
51  }
52 
53  arm_compute::ITensor& GetTensor() override { return m_Tensor; }
54  arm_compute::ITensor const& GetTensor() const override { return m_Tensor; }
55 
56  virtual void Allocate() override
57  {
58  // If we have enabled Importing, don't Allocate the tensor
59  if (!m_IsImportEnabled)
60  {
61  armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
62  }
63  };
64 
65  virtual void Manage() override
66  {
67  // If we have enabled Importing, don't manage the tensor
68  if (!m_IsImportEnabled)
69  {
70  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_MemoryGroup, "arm_compute::MemoryGroup is null.");
71  m_MemoryGroup->manage(&m_Tensor);
72  }
73  }
74 
75  virtual ITensorHandle* GetParent() const override { return nullptr; }
76 
77  virtual arm_compute::DataType GetDataType() const override
78  {
79  return m_Tensor.info()->data_type();
80  }
81 
82  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup) override
83  {
84  m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
85  }
86 
87  virtual const void* Map(bool /* blocking = true */) const override
88  {
89  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
90  }
91 
92  virtual void Unmap() const override {}
93 
94  TensorShape GetStrides() const override
95  {
96  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
97  }
98 
99  TensorShape GetShape() const override
100  {
101  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
102  }
103 
105  {
106  m_ImportFlags = importFlags;
107  }
108 
110  {
111  return m_ImportFlags;
112  }
113 
114  void SetImportEnabledFlag(bool importEnabledFlag)
115  {
116  m_IsImportEnabled = importEnabledFlag;
117  }
118 
119  bool CanBeImported(void* memory, MemorySource source) override
120  {
121  if (source != MemorySource::Malloc || reinterpret_cast<uintptr_t>(memory) % m_TypeAlignment)
122  {
123  return false;
124  }
125  return true;
126  }
127 
128  virtual bool Import(void* memory, MemorySource source) override
129  {
130  if (m_ImportFlags& static_cast<MemorySourceFlags>(source))
131  {
132  if (source == MemorySource::Malloc && m_IsImportEnabled)
133  {
134  if (!CanBeImported(memory, source))
135  {
136  throw MemoryImportException("NeonTensorHandle::Import Attempting to import unaligned memory");
137  }
138 
139  // m_Tensor not yet Allocated
140  if (!m_Imported && !m_Tensor.buffer())
141  {
142  arm_compute::Status status = m_Tensor.allocator()->import_memory(memory);
143  // Use the overloaded bool operator of Status to check if it worked, if not throw an exception
144  // with the Status error message
145  m_Imported = bool(status);
146  if (!m_Imported)
147  {
148  throw MemoryImportException(status.error_description());
149  }
150  return m_Imported;
151  }
152 
153  // m_Tensor.buffer() initially allocated with Allocate().
154  if (!m_Imported && m_Tensor.buffer())
155  {
156  throw MemoryImportException(
157  "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
158  }
159 
160  // m_Tensor.buffer() previously imported.
161  if (m_Imported)
162  {
163  arm_compute::Status status = m_Tensor.allocator()->import_memory(memory);
164  // Use the overloaded bool operator of Status to check if it worked, if not throw an exception
165  // with the Status error message
166  m_Imported = bool(status);
167  if (!m_Imported)
168  {
169  throw MemoryImportException(status.error_description());
170  }
171  return m_Imported;
172  }
173  }
174  else
175  {
176  throw MemoryImportException("NeonTensorHandle::Import is disabled");
177  }
178  }
179  else
180  {
181  throw MemoryImportException("NeonTensorHandle::Incorrect import flag");
182  }
183  return false;
184  }
185 
186  virtual std::shared_ptr<ITensorHandle> DecorateTensorHandle(const TensorInfo& tensorInfo) override;
187 
188 private:
189  // Only used for testing
190  void CopyOutTo(void* memory) const override
191  {
192  switch (this->GetDataType())
193  {
194  case arm_compute::DataType::F32:
195  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
196  static_cast<float*>(memory));
197  break;
198  case arm_compute::DataType::U8:
199  case arm_compute::DataType::QASYMM8:
200  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
201  static_cast<uint8_t*>(memory));
202  break;
203  case arm_compute::DataType::QSYMM8:
204  case arm_compute::DataType::QASYMM8_SIGNED:
205  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
206  static_cast<int8_t*>(memory));
207  break;
208  case arm_compute::DataType::BFLOAT16:
209  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
210  static_cast<armnn::BFloat16*>(memory));
211  break;
212  case arm_compute::DataType::F16:
213  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
214  static_cast<armnn::Half*>(memory));
215  break;
216  case arm_compute::DataType::S16:
217  case arm_compute::DataType::QSYMM16:
218  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
219  static_cast<int16_t*>(memory));
220  break;
221  case arm_compute::DataType::S32:
222  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
223  static_cast<int32_t*>(memory));
224  break;
225  default:
226  {
228  }
229  }
230  }
231 
232  // Only used for testing
233  void CopyInFrom(const void* memory) override
234  {
235  switch (this->GetDataType())
236  {
237  case arm_compute::DataType::F32:
238  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
239  this->GetTensor());
240  break;
241  case arm_compute::DataType::U8:
242  case arm_compute::DataType::QASYMM8:
243  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
244  this->GetTensor());
245  break;
246  case arm_compute::DataType::QSYMM8:
247  case arm_compute::DataType::QASYMM8_SIGNED:
248  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
249  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
250  this->GetTensor());
251  break;
252  case arm_compute::DataType::BFLOAT16:
253  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::BFloat16*>(memory),
254  this->GetTensor());
255  break;
256  case arm_compute::DataType::F16:
257  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
258  this->GetTensor());
259  break;
260  case arm_compute::DataType::S16:
261  case arm_compute::DataType::QSYMM16:
262  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
263  this->GetTensor());
264  break;
265  case arm_compute::DataType::S32:
266  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
267  this->GetTensor());
268  break;
269  default:
270  {
272  }
273  }
274  }
275 
276  arm_compute::Tensor m_Tensor;
277  std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
278  MemorySourceFlags m_ImportFlags;
279  bool m_Imported;
280  bool m_IsImportEnabled;
281  const uintptr_t m_TypeAlignment;
282  std::vector<std::shared_ptr<NeonTensorHandleDecorator>> m_Decorated;
283 };
284 
286 {
287 public:
289  const arm_compute::TensorShape& shape,
290  const arm_compute::Coordinates& coords)
291  : m_Tensor(&parent->GetTensor(), shape, coords, true)
292  {
293  parentHandle = parent;
294  }
295 
296  arm_compute::ITensor& GetTensor() override { return m_Tensor; }
297  arm_compute::ITensor const& GetTensor() const override { return m_Tensor; }
298 
299  virtual void Allocate() override {}
300  virtual void Manage() override {}
301 
302  virtual ITensorHandle* GetParent() const override { return parentHandle; }
303 
304  virtual arm_compute::DataType GetDataType() const override
305  {
306  return m_Tensor.info()->data_type();
307  }
308 
309  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>&) override {}
310 
311  virtual const void* Map(bool /* blocking = true */) const override
312  {
313  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
314  }
315  virtual void Unmap() const override {}
316 
317  TensorShape GetStrides() const override
318  {
319  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
320  }
321 
322  TensorShape GetShape() const override
323  {
324  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
325  }
326 
327  virtual std::shared_ptr<ITensorHandle> DecorateTensorHandle(const TensorInfo&) override
328  {
329  return nullptr;
330  };
331 
332 private:
333  // Only used for testing
334  void CopyOutTo(void* memory) const override
335  {
336  switch (this->GetDataType())
337  {
338  case arm_compute::DataType::F32:
339  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
340  static_cast<float*>(memory));
341  break;
342  case arm_compute::DataType::U8:
343  case arm_compute::DataType::QASYMM8:
344  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
345  static_cast<uint8_t*>(memory));
346  break;
347  case arm_compute::DataType::QSYMM8:
348  case arm_compute::DataType::QASYMM8_SIGNED:
349  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
350  static_cast<int8_t*>(memory));
351  break;
352  case arm_compute::DataType::S16:
353  case arm_compute::DataType::QSYMM16:
354  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
355  static_cast<int16_t*>(memory));
356  break;
357  case arm_compute::DataType::S32:
358  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
359  static_cast<int32_t*>(memory));
360  break;
361  default:
362  {
364  }
365  }
366  }
367 
368  // Only used for testing
369  void CopyInFrom(const void* memory) override
370  {
371  switch (this->GetDataType())
372  {
373  case arm_compute::DataType::F32:
374  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
375  this->GetTensor());
376  break;
377  case arm_compute::DataType::U8:
378  case arm_compute::DataType::QASYMM8:
379  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
380  this->GetTensor());
381  break;
382  case arm_compute::DataType::QSYMM8:
383  case arm_compute::DataType::QASYMM8_SIGNED:
384  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
385  this->GetTensor());
386  break;
387  case arm_compute::DataType::S16:
388  case arm_compute::DataType::QSYMM16:
389  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
390  this->GetTensor());
391  break;
392  case arm_compute::DataType::S32:
393  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
394  this->GetTensor());
395  break;
396  default:
397  {
399  }
400  }
401  }
402 
403  arm_compute::SubTensor m_Tensor;
404  ITensorHandle* parentHandle = nullptr;
405 };
406 
407 /// NeonTensorDecorator wraps an existing Neon tensor allowing us to override the TensorInfo for it
408 class NeonTensorDecorator : public arm_compute::ITensor
409 {
410 public:
412 
413  NeonTensorDecorator(arm_compute::ITensor* original, const TensorInfo& info);
414 
415  ~NeonTensorDecorator() = default;
416 
417  NeonTensorDecorator(const NeonTensorDecorator&) = delete;
418 
420 
422 
424 
425  // Inherited methods overridden:
426  arm_compute::ITensorInfo* info() const override;
427 
428  arm_compute::ITensorInfo* info() override;
429 
430  uint8_t* buffer() const override;
431 
432 private:
433  arm_compute::ITensor* m_Original;
434  mutable arm_compute::TensorInfo m_TensorInfo;
435 };
436 
438 {
439 public:
441  : m_Tensor(&parent->GetTensor(), info)
442  {
443  parentHandle = parent;
444  }
445 
446  arm_compute::ITensor& GetTensor() override { return m_Tensor; }
447  arm_compute::ITensor const& GetTensor() const override { return m_Tensor; }
448 
449  virtual void Allocate() override {}
450  virtual void Manage() override {}
451 
452  virtual ITensorHandle* GetParent() const override { return nullptr; }
453 
454  virtual arm_compute::DataType GetDataType() const override
455  {
456  return m_Tensor.info()->data_type();
457  }
458 
459  virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>&) override {}
460 
461  virtual const void* Map(bool /* blocking = true */) const override
462  {
463  return static_cast<const void*>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
464  }
465  virtual void Unmap() const override {}
466 
467  TensorShape GetStrides() const override
468  {
469  return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
470  }
471 
472  TensorShape GetShape() const override
473  {
474  return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
475  }
476 
477  virtual std::shared_ptr<ITensorHandle> DecorateTensorHandle(const TensorInfo&) override
478  {
479  return nullptr;
480  };
481 
482 private:
483  // Only used for testing
484  void CopyOutTo(void* memory) const override
485  {
486  switch (this->GetDataType())
487  {
488  case arm_compute::DataType::F32:
489  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
490  static_cast<float*>(memory));
491  break;
492  case arm_compute::DataType::U8:
493  case arm_compute::DataType::QASYMM8:
494  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
495  static_cast<uint8_t*>(memory));
496  break;
497  case arm_compute::DataType::QSYMM8:
498  case arm_compute::DataType::QASYMM8_SIGNED:
499  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
500  static_cast<int8_t*>(memory));
501  break;
502  case arm_compute::DataType::S16:
503  case arm_compute::DataType::QSYMM16:
504  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
505  static_cast<int16_t*>(memory));
506  break;
507  case arm_compute::DataType::S32:
508  armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
509  static_cast<int32_t*>(memory));
510  break;
511  default:
512  {
514  }
515  }
516  }
517 
518  // Only used for testing
519  void CopyInFrom(const void* memory) override
520  {
521  switch (this->GetDataType())
522  {
523  case arm_compute::DataType::F32:
524  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
525  this->GetTensor());
526  break;
527  case arm_compute::DataType::U8:
528  case arm_compute::DataType::QASYMM8:
529  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
530  this->GetTensor());
531  break;
532  case arm_compute::DataType::QSYMM8:
533  case arm_compute::DataType::QASYMM8_SIGNED:
534  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
535  this->GetTensor());
536  break;
537  case arm_compute::DataType::S16:
538  case arm_compute::DataType::QSYMM16:
539  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
540  this->GetTensor());
541  break;
542  case arm_compute::DataType::S32:
543  armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
544  this->GetTensor());
545  break;
546  default:
547  {
549  }
550  }
551  }
552 
553  NeonTensorDecorator m_Tensor;
554  ITensorHandle* parentHandle = nullptr;
555 };
556 
557 
558 } // namespace armnn
armnn::MemorySource::Malloc
@ Malloc
armnn::NeonTensorDecorator::~NeonTensorDecorator
~NeonTensorDecorator()=default
armnn::NeonSubTensorHandle::GetTensor
arm_compute::ITensor & GetTensor() override
Definition: NeonTensorHandle.hpp:296
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
TypesUtils.hpp
armnn::NeonSubTensorHandle::NeonSubTensorHandle
NeonSubTensorHandle(IAclTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
Definition: NeonTensorHandle.hpp:288
armnn::NeonSubTensorHandle::Unmap
virtual void Unmap() const override
Unmap the tensor data.
Definition: NeonTensorHandle.hpp:315
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::NeonTensorHandle::NeonTensorHandle
NeonTensorHandle(const TensorInfo &tensorInfo)
Definition: NeonTensorHandle.hpp:31
armnn::NeonTensorHandle::Allocate
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
Definition: NeonTensorHandle.hpp:56
armnn::MemorySourceFlags
unsigned int MemorySourceFlags
Definition: MemorySources.hpp:15
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::NeonSubTensorHandle::DecorateTensorHandle
virtual std::shared_ptr< ITensorHandle > DecorateTensorHandle(const TensorInfo &) override
Returns a decorated version of this TensorHandle allowing us to override the TensorInfo for it.
Definition: NeonTensorHandle.hpp:327
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn::NeonTensorHandle::GetImportFlags
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
Definition: NeonTensorHandle.hpp:109
armnn::NeonSubTensorHandle::Allocate
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
Definition: NeonTensorHandle.hpp:299
armnn::NeonSubTensorHandle::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: NeonTensorHandle.hpp:322
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::NeonTensorHandle::Unmap
virtual void Unmap() const override
Unmap the tensor data.
Definition: NeonTensorHandle.hpp:92
armnn::NeonSubTensorHandle::SetMemoryGroup
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
Definition: NeonTensorHandle.hpp:309
armnn::NeonTensorHandleDecorator::GetTensor
arm_compute::ITensor const & GetTensor() const override
Definition: NeonTensorHandle.hpp:447
armnn::NeonTensorDecorator::buffer
uint8_t * buffer() const override
Definition: NeonTensorHandle.cpp:42
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::NeonTensorHandle::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: NeonTensorHandle.hpp:99
armnn::NeonTensorDecorator
NeonTensorDecorator wraps an existing Neon tensor allowing us to override the TensorInfo for it.
Definition: NeonTensorHandle.hpp:408
armnn::NeonSubTensorHandle::Manage
virtual void Manage() override
Indicate to the memory manager that this resource is active.
Definition: NeonTensorHandle.hpp:300
armnn::NeonSubTensorHandle::GetStrides
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Definition: NeonTensorHandle.hpp:317
armnn::NeonTensorHandleDecorator::GetTensor
arm_compute::ITensor & GetTensor() override
Definition: NeonTensorHandle.hpp:446
armnn::IAclTensorHandle
Definition: ArmComputeTensorHandle.hpp:16
PolymorphicDowncast.hpp
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::NeonTensorHandle::Manage
virtual void Manage() override
Indicate to the memory manager that this resource is active.
Definition: NeonTensorHandle.hpp:65
armnn::NeonTensorHandleDecorator::GetStrides
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Definition: NeonTensorHandle.hpp:467
armnn::NeonTensorHandle::CanBeImported
bool CanBeImported(void *memory, MemorySource source) override
Implementations must determine if this memory block can be imported.
Definition: NeonTensorHandle.hpp:119
armnn::NeonTensorHandle::NeonTensorHandle
NeonTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
Definition: NeonTensorHandle.hpp:40
armnn::NeonTensorHandle::Import
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
Definition: NeonTensorHandle.hpp:128
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:182
armnn::NeonTensorHandle::GetDataType
virtual arm_compute::DataType GetDataType() const override
Definition: NeonTensorHandle.hpp:77
armnn::NeonSubTensorHandle::GetParent
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
Definition: NeonTensorHandle.hpp:302
armnn::BoostLogSeverityMapping::info
@ info
armnn::NeonTensorHandle::SetImportEnabledFlag
void SetImportEnabledFlag(bool importEnabledFlag)
Definition: NeonTensorHandle.hpp:114
ArmComputeTensorHandle.hpp
armnn::NeonTensorHandleDecorator::Map
virtual const void * Map(bool) const override
Map the tensor data for access.
Definition: NeonTensorHandle.hpp:461
armnn::NeonTensorHandleDecorator::Unmap
virtual void Unmap() const override
Unmap the tensor data.
Definition: NeonTensorHandle.hpp:465
Half.hpp
armnn::NeonTensorHandle::Map
virtual const void * Map(bool) const override
Map the tensor data for access.
Definition: NeonTensorHandle.hpp:87
armnn::NeonTensorHandleDecorator::SetMemoryGroup
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
Definition: NeonTensorHandle.hpp:459
armnn::NeonSubTensorHandle::GetDataType
virtual arm_compute::DataType GetDataType() const override
Definition: NeonTensorHandle.hpp:304
armnn::Status
Status
Definition: Types.hpp:42
armnn::NeonTensorHandleDecorator::GetDataType
virtual arm_compute::DataType GetDataType() const override
Definition: NeonTensorHandle.hpp:454
armnn::NeonSubTensorHandle
Definition: NeonTensorHandle.hpp:285
armnn::NeonTensorHandle::GetTensor
arm_compute::ITensor & GetTensor() override
Definition: NeonTensorHandle.hpp:53
armnn::MemoryImportException
Definition: Exceptions.hpp:125
armnn::BFloat16
Definition: BFloat16.hpp:15
armnn::MemorySource
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:244
armnn::NeonTensorDecorator::operator=
NeonTensorDecorator & operator=(const NeonTensorDecorator &)=delete
armnn::NeonTensorHandleDecorator::Allocate
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
Definition: NeonTensorHandle.hpp:449
armnn::NeonTensorHandle::GetParent
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
Definition: NeonTensorHandle.hpp:75
armnn::NeonSubTensorHandle::GetTensor
arm_compute::ITensor const & GetTensor() const override
Definition: NeonTensorHandle.hpp:297
armnn::NeonTensorHandleDecorator
Definition: NeonTensorHandle.hpp:437
Exceptions.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::NeonTensorHandle::DecorateTensorHandle
virtual std::shared_ptr< ITensorHandle > DecorateTensorHandle(const TensorInfo &tensorInfo) override
Returns a decorated version of this TensorHandle allowing us to override the TensorInfo for it.
Definition: NeonTensorHandle.cpp:12
armnn::NeonTensorHandleDecorator::Manage
virtual void Manage() override
Indicate to the memory manager that this resource is active.
Definition: NeonTensorHandle.hpp:450
ArmComputeTensorUtils.hpp
armnn::NeonTensorHandleDecorator::GetShape
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
Definition: NeonTensorHandle.hpp:472
armnn::NeonTensorDecorator::info
arm_compute::ITensorInfo * info() const override
Definition: NeonTensorHandle.cpp:32
armnn::NeonTensorHandle::GetTensor
arm_compute::ITensor const & GetTensor() const override
Definition: NeonTensorHandle.hpp:54
BFloat16.hpp
armnn::UnimplementedException
Definition: Exceptions.hpp:98
armnn::NeonTensorHandleDecorator::GetParent
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
Definition: NeonTensorHandle.hpp:452
armnn::NeonTensorHandle::SetMemoryGroup
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
Definition: NeonTensorHandle.hpp:82
armnn::NeonTensorHandleDecorator::DecorateTensorHandle
virtual std::shared_ptr< ITensorHandle > DecorateTensorHandle(const TensorInfo &) override
Returns a decorated version of this TensorHandle allowing us to override the TensorInfo for it.
Definition: NeonTensorHandle.hpp:477
armnn::NeonTensorHandle::SetImportFlags
void SetImportFlags(MemorySourceFlags importFlags)
Definition: NeonTensorHandle.hpp:104
armnn::NeonTensorDecorator::NeonTensorDecorator
NeonTensorDecorator()
Definition: NeonTensorHandle.cpp:20
armnn::NeonSubTensorHandle::Map
virtual const void * Map(bool) const override
Map the tensor data for access.
Definition: NeonTensorHandle.hpp:311
armnn::NeonTensorHandle::GetStrides
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Definition: NeonTensorHandle.hpp:94
armnn::NeonTensorHandleDecorator::NeonTensorHandleDecorator
NeonTensorHandleDecorator(IAclTensorHandle *parent, const TensorInfo &info)
Definition: NeonTensorHandle.hpp:440
armnn::NeonTensorHandle
Definition: NeonTensorHandle.hpp:28
ARMNN_THROW_INVALIDARG_MSG_IF_FALSE
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
Definition: Exceptions.hpp:210