ArmNN
 25.11
Loading...
Searching...
No Matches
NeonTensorHandleFactory.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
8
9#include "Layer.hpp"
10
14
15namespace armnn
16{
17
19
21 const TensorShape& subTensorShape,
22 const unsigned int* subTensorOrigin)
23 const
24{
25 const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
26
27 arm_compute::Coordinates coords;
28 coords.set_num_dimensions(subTensorShape.GetNumDimensions());
29 for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); ++i)
30 {
31 // Arm compute indexes tensor coords in reverse order.
32 unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
33 coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
34 }
35
36 const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
37
38 if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
39 {
40 return nullptr;
41 }
42
43 return std::make_unique<NeonSubTensorHandle>(
44 PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
45}
46
47std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
48{
49 return NeonTensorHandleFactory::CreateTensorHandle(tensorInfo, true);
50}
51
52std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
53 DataLayout dataLayout) const
54{
55 return NeonTensorHandleFactory::CreateTensorHandle(tensorInfo, dataLayout, true);
56}
57
58std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
59 const bool IsMemoryManaged) const
60{
61 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
62 if (IsMemoryManaged)
63 {
64 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
65 }
66 // If we are not Managing the Memory then we must be importing
67 tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
68 tensorHandle->SetImportFlags(GetImportFlags());
69
70 return tensorHandle;
71}
72
73std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
74 DataLayout dataLayout,
75 const bool IsMemoryManaged) const
76{
77 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
78 if (IsMemoryManaged)
79 {
80 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
81 }
82 // If we are not Managing the Memory then we must be importing
83 tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
84 tensorHandle->SetImportFlags(GetImportFlags());
85
86 return tensorHandle;
87}
88
90{
91 static const FactoryId s_Id(NeonTensorHandleFactoryId());
92 return s_Id;
93}
94
96{
97 return GetIdStatic();
98}
99
101{
102 return true;
103}
104
106{
107 return false;
108}
109
111{
112 return m_ExportFlags;
113}
114
116{
117 return m_ImportFlags;
118}
119
121 const IConnectableLayer* connectedLayer,
122 CapabilityClass capabilityClass)
123
124{
125 IgnoreUnused(connectedLayer);
126 std::vector<Capability> capabilities;
127 if (capabilityClass == CapabilityClass::PaddingRequired)
128 {
129 auto search = paddingRequiredLayers.find((PolymorphicDowncast<const Layer*>(layer))->GetType());
130 if ( search != paddingRequiredLayers.end())
131 {
132 Capability paddingCapability(CapabilityClass::PaddingRequired, true);
133 capabilities.push_back(paddingCapability);
134 }
135 }
136 return capabilities;
137}
138
139} // namespace armnn
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
const FactoryId & GetId() const override
std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass) override
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const override
MemorySourceFlags GetExportFlags() const override
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, const TensorShape &subTensorShape, const unsigned int *subTensorOrigin) const override
MemorySourceFlags GetImportFlags() const override
static const FactoryId & GetIdStatic()
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition Tensor.cpp:174
Copyright (c) 2021 ARM Limited and Contributors.
CapabilityClass
Capability class to calculate in the GetCapabilities function so that only the capability in the scop...
const std::set< armnn::LayerType > paddingRequiredLayers
unsigned int MemorySourceFlags
ITensorHandleFactory::FactoryId FactoryId
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
DataLayout
Definition Types.hpp:63
constexpr const char * NeonTensorHandleFactoryId()
void IgnoreUnused(Ts &&...)
Capability of the TensorHandleFactory.