16 #include <arm_compute/runtime/MemoryGroup.h> 17 #include <arm_compute/runtime/IMemoryGroup.h> 18 #include <arm_compute/runtime/Tensor.h> 19 #include <arm_compute/runtime/SubTensor.h> 20 #include <arm_compute/core/TensorShape.h> 21 #include <arm_compute/core/Coordinates.h> 32 m_IsImportEnabled(
false)
34 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo);
40 : m_ImportFlags(importFlags),
42 m_IsImportEnabled(
false)
45 armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo, dataLayout);
48 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
49 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
54 if (!m_IsImportEnabled)
56 armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);
63 if (!m_IsImportEnabled)
66 m_MemoryGroup->manage(&m_Tensor);
74 return m_Tensor.info()->data_type();
77 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup)
override 79 m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
82 virtual const void*
Map(
bool )
const override 84 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
87 virtual void Unmap()
const override {}
91 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
96 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
101 m_ImportFlags = importFlags;
106 return m_ImportFlags;
111 m_IsImportEnabled = importEnabledFlag;
116 if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
121 constexpr uintptr_t alignment =
sizeof(size_t);
122 if (reinterpret_cast<uintptr_t>(memory) % alignment)
128 if (!m_Imported && !m_Tensor.buffer())
133 m_Imported = bool(status);
142 if (!m_Imported && m_Tensor.buffer())
145 "NeonTensorHandle::Import Attempting to import on an already allocated tensor");
154 m_Imported = bool(status);
176 void CopyOutTo(
void* memory)
const override 180 case arm_compute::DataType::F32:
181 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
182 static_cast<float*>(memory));
184 case arm_compute::DataType::U8:
185 case arm_compute::DataType::QASYMM8:
186 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
187 static_cast<uint8_t*>(memory));
189 case arm_compute::DataType::QSYMM8:
190 case arm_compute::DataType::QASYMM8_SIGNED:
191 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
192 static_cast<int8_t*>(memory));
194 case arm_compute::DataType::BFLOAT16:
195 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
196 static_cast<armnn::BFloat16*>(memory));
198 case arm_compute::DataType::F16:
199 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
200 static_cast<armnn::Half*>(memory));
202 case arm_compute::DataType::S16:
203 case arm_compute::DataType::QSYMM16:
204 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
205 static_cast<int16_t*>(memory));
207 case arm_compute::DataType::S32:
208 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
209 static_cast<int32_t*>(memory));
219 void CopyInFrom(
const void* memory)
override 223 case arm_compute::DataType::F32:
224 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
227 case arm_compute::DataType::U8:
228 case arm_compute::DataType::QASYMM8:
229 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
232 case arm_compute::DataType::QSYMM8:
233 case arm_compute::DataType::QASYMM8_SIGNED:
234 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
237 case arm_compute::DataType::BFLOAT16:
238 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::BFloat16*>(memory),
241 case arm_compute::DataType::F16:
242 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
245 case arm_compute::DataType::S16:
246 case arm_compute::DataType::QSYMM16:
247 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
250 case arm_compute::DataType::S32:
251 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
261 arm_compute::Tensor m_Tensor;
262 std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
265 bool m_IsImportEnabled;
272 const arm_compute::TensorShape& shape,
274 : m_Tensor(&parent->
GetTensor(), shape, coords)
276 parentHandle = parent;
279 arm_compute::ITensor&
GetTensor()
override {
return m_Tensor; }
280 arm_compute::ITensor
const&
GetTensor()
const override {
return m_Tensor; }
289 return m_Tensor.info()->data_type();
292 virtual void SetMemoryGroup(
const std::shared_ptr<arm_compute::IMemoryGroup>&)
override {}
294 virtual const void*
Map(
bool )
const override 296 return static_cast<const void*
>(m_Tensor.buffer() + m_Tensor.info()->offset_first_element_in_bytes());
298 virtual void Unmap()
const override {}
302 return armcomputetensorutils::GetStrides(m_Tensor.info()->strides_in_bytes());
307 return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
312 void CopyOutTo(
void* memory)
const override 316 case arm_compute::DataType::F32:
317 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
318 static_cast<float*>(memory));
320 case arm_compute::DataType::U8:
321 case arm_compute::DataType::QASYMM8:
322 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
323 static_cast<uint8_t*>(memory));
325 case arm_compute::DataType::QSYMM8:
326 case arm_compute::DataType::QASYMM8_SIGNED:
327 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
328 static_cast<int8_t*>(memory));
330 case arm_compute::DataType::S16:
331 case arm_compute::DataType::QSYMM16:
332 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
333 static_cast<int16_t*>(memory));
335 case arm_compute::DataType::S32:
336 armcomputetensorutils::CopyArmComputeITensorData(this->
GetTensor(),
337 static_cast<int32_t*>(memory));
347 void CopyInFrom(
const void* memory)
override 351 case arm_compute::DataType::F32:
352 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
355 case arm_compute::DataType::U8:
356 case arm_compute::DataType::QASYMM8:
357 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
360 case arm_compute::DataType::QSYMM8:
361 case arm_compute::DataType::QASYMM8_SIGNED:
362 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
365 case arm_compute::DataType::S16:
366 case arm_compute::DataType::QSYMM16:
367 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
370 case arm_compute::DataType::S32:
371 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
381 arm_compute::SubTensor m_Tensor;
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &) override
virtual arm_compute::DataType GetDataType() const override
virtual void SetMemoryGroup(const std::shared_ptr< arm_compute::IMemoryGroup > &memoryGroup) override
virtual arm_compute::DataType GetDataType() const override
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
virtual void Manage() override
Indicate to the memory manager that this resource is active.
virtual void Unmap() const override
Unmap the tensor data.
arm_compute::ITensor const & GetTensor() const override
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
unsigned int MemorySourceFlags
arm_compute::ITensor const & GetTensor() const override
virtual void Unmap() const override
Unmap the tensor data.
Copyright (c) 2021 ARM Limited and Contributors.
virtual bool Import(void *memory, MemorySource source) override
Import externally allocated memory.
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
NeonSubTensorHandle(IAclTensorHandle *parent, const arm_compute::TensorShape &shape, const arm_compute::Coordinates &coords)
#define ARMNN_ASSERT(COND)
arm_compute::ITensor & GetTensor() override
MemorySourceFlags GetImportFlags() const override
Get flags describing supported import sources.
TensorShape GetStrides() const override
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
virtual const void * Map(bool) const override
Map the tensor data for access.
NeonTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, MemorySourceFlags importFlags=static_cast< MemorySourceFlags >(MemorySource::Malloc))
virtual ITensorHandle * GetParent() const override
Get the parent tensor if this is a subtensor.
MemorySource
Define the Memory Source to reduce copies.
virtual void Allocate() override
Indicate to the memory manager that this resource is no longer active.
NeonTensorHandle(const TensorInfo &tensorInfo)
virtual const void * Map(bool) const override
Map the tensor data for access.
void SetImportFlags(MemorySourceFlags importFlags)
arm_compute::ITensor & GetTensor() override
void SetImportEnabledFlag(bool importEnabledFlag)
virtual void Manage() override
Indicate to the memory manager that this resource is active.