ArmNN
 25.11
Loading...
Searching...
No Matches
RefBackend.cpp
Go to the documentation of this file.
1//
2// Copyright © 2022-2025 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "RefBackend.hpp"
7#include "RefBackendId.hpp"
9#include "RefLayerSupport.hpp"
11
18
19namespace armnn
20{
21
23{
24 static const BackendId s_Id{RefBackendId()};
25 return s_Id;
26}
27
29 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
30{
31 return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager));
32}
33
35 class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
36{
37 auto memoryManager = std::make_shared<RefMemoryManager>();
38
39 tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
40
41 std::unique_ptr<RefTensorHandleFactory> factory = std::make_unique<RefTensorHandleFactory>(memoryManager);
42 // Register copy and import factory pair
43 tensorHandleFactoryRegistry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
44 // Register the factory
45 tensorHandleFactoryRegistry.RegisterFactory(std::move(factory));
46
47 return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager));
48}
49
54
60
62{
63 return std::make_unique<RefMemoryManager>();
64}
65
67{
68 static ILayerSupportSharedPtr layerSupport{new RefLayerSupport};
69 return layerSupport;
70}
71
73 const ModelOptions& modelOptions) const
74{
75 OptimizationViews optimizationViews(modelOptions);
76
77 auto it = subgraph.end();
78 std::map<LayerGuid, Layer*> untouched;
79
80 while (it != subgraph.begin())
81 {
82 --it;
83 Layer& base = *(PolymorphicDowncast<Layer*>(*it));
84 untouched.insert({base.GetGuid(), &base});
85 }
86
87 it = subgraph.end();
88 while (it != subgraph.begin())
89 {
90 --it;
91 Layer& base = *(PolymorphicDowncast<Layer*>(*it));
92
93 // Special case to fuse padding into average pooling 2d for quantized datatype.
94 // Required to be done as a backend specific optimization as Neon does not support this special case.
95 if (base.GetType() == LayerType::Pooling2d)
96 {
98 Pooling2dDescriptor poolingDescriptor = baseLayer->GetParameters();
99
101 {
104 if (padLayer->GetOutputSlot(0).GetNumConnections() == 1 &&
106 poolingDescriptor,
107 padLayer->GetOutputSlot().GetTensorInfo(),
108 true))
109 {
110 FoldPadLayer2d<Pooling2dLayer, Pooling2dDescriptor>(optimizationViews, baseLayer,
111 poolingDescriptor, padLayer);
112 untouched.erase(baseLayer->GetGuid());
113 untouched.erase(padLayer->GetGuid());
114 }
115 }
116 }
117
118 if (base.GetType() == LayerType::Convolution2d)
119 {
121 Convolution2dDescriptor convDescriptor = baseLayer->GetParameters();
123 {
124 // perform fold pad into conv2d if possible
127 if (padLayer->GetOutputSlot(0).GetNumConnections() == 1 &&
129 convDescriptor,
130 padLayer->GetOutputSlot().GetTensorInfo()))
131 {
133 convDescriptor, padLayer);
134
135 untouched.erase(baseLayer->GetGuid());
136 untouched.erase(padLayer->GetGuid());
137 }
138 }
139 }
141 {
143 DepthwiseConvolution2dDescriptor convDescriptor = baseLayer->GetParameters();
145 {
146 // perform fold pad into depthwiseconv2d if possible
149 if (padLayer->GetOutputSlot(0).GetNumConnections() == 1 &&
151 padLayer->GetParameters(),
152 convDescriptor,
153 padLayer->GetOutputSlot().GetTensorInfo()))
154 {
156 baseLayer,
157 convDescriptor,
158 padLayer);
159
160 untouched.erase(baseLayer->GetGuid());
161 untouched.erase(padLayer->GetGuid());
162 }
163 }
164 }
165
166 // Remove Reshape where possible
167 if (base.GetType() == LayerType::Reshape)
168 {
170 RemoveReshapeLayer(baseLayer, untouched, optimizationViews);
171 }
172 }
173
174 if (optimizationViews.GetSubstitutions().empty() && optimizationViews.GetDeletedSubgraphs().empty())
175 {
176 optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
177 }
178 else
179 {
180 ReportUntouchedLayers(optimizationViews, untouched);
181 }
182
183 return optimizationViews;
184}
185
186std::vector<ITensorHandleFactory::FactoryId> RefBackend::GetHandleFactoryPreferences() const
187{
188 return std::vector<ITensorHandleFactory::FactoryId> { RefTensorHandleFactory::GetIdStatic() };
189}
190
192{
193 auto memoryManager = std::make_shared<RefMemoryManager>();
194
195 registry.RegisterMemoryManager(memoryManager);
196
197 std::unique_ptr<RefTensorHandleFactory> factory = std::make_unique<RefTensorHandleFactory>(memoryManager);
198
199 // Register copy and import factory pair
200 registry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
201 // Register the factory
202 registry.RegisterFactory(std::move(factory));
203}
204
205std::unique_ptr<ICustomAllocator> RefBackend::GetDefaultAllocator() const
206{
207 return std::make_unique<DefaultAllocator>();
208}
209
210} // namespace armnn
This layer represents a convolution 2d operation.
This layer represents a depthwise convolution 2d operation.
std::shared_ptr< ILayerSupport > ILayerSupportSharedPtr
std::unique_ptr< IMemoryManager > IMemoryManagerUniquePtr
std::unique_ptr< arm::pipe::IBackendProfiling > IBackendProfilingPtr
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::unique_ptr< IWorkloadFactory > IWorkloadFactoryPtr
std::unique_ptr< IBackendContext > IBackendContextPtr
std::shared_ptr< arm::pipe::IBackendProfilingContext > IBackendProfilingContextPtr
This is the bridge between backend and backend profiling we'll keep it in the backend namespace.
const OutputSlot * GetConnectedOutputSlot() const
Definition Layer.hpp:56
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition Layer.hpp:343
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition Layer.hpp:286
const Parameters & GetParameters() const override
If the layer has a descriptor return it.
void AddUntouchedSubgraph(SubgraphView &&subgraph)
const Subgraphs & GetDeletedSubgraphs() const
const Substitutions & GetSubstitutions() const
unsigned int GetNumConnections() const override
Definition Layer.hpp:158
Layer & GetOwningLayer() const
Definition Layer.hpp:132
const TensorInfo & GetTensorInfo() const override
Definition Layer.cpp:100
This layer represents a pad operation.
Definition PadLayer.hpp:15
This layer represents a pooling 2d operation.
static const BackendId & GetIdStatic()
OptimizationViews OptimizeSubgraphView(const SubgraphView &subgraph, const ModelOptions &modelOptions) const override
void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry &registry) override
(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFa...
std::vector< ITensorHandleFactory::FactoryId > GetHandleFactoryPreferences() const override
(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.
IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions &) const override
Create the runtime context of the backend.
IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr &memoryManager=nullptr) const override
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override
IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions &creationOptions, IBackendProfilingPtr &backendProfiling) override
Create context specifically used for profiling interaction from backends.
std::unique_ptr< ICustomAllocator > GetDefaultAllocator() const override
Returns the default memory allocator for the backend.
static const FactoryId & GetIdStatic()
This layer represents a reshape operation.
The SubgraphView class represents a subgraph of a Graph.
IConnectableLayerIterator begin()
IConnectableLayerIterator end()
void RegisterFactory(std::unique_ptr< ITensorHandleFactory > allocator)
Register a TensorHandleFactory and transfer ownership.
void RegisterMemoryManager(std::shared_ptr< IMemoryManager > memoryManger)
Register a memory manager with shared ownership.
void RegisterCopyAndImportFactoryPair(ITensorHandleFactory::FactoryId copyFactoryId, ITensorHandleFactory::FactoryId importFactoryId)
Register a pair of TensorHandleFactory Id for Memory Copy and TensorHandleFactory Id for Memory Impor...
bool TryFoldPadIntoLayer2d(const PadDescriptor &padDescriptor, Descriptor &layerDescriptor, const TensorInfo &tensorInfo)
Copyright (c) 2021 ARM Limited and Contributors.
auto PolymorphicPointerDowncast(const SourceType &value)
Polymorphic downcast for shared pointers and build in pointers.
std::vector< BackendOptions > ModelOptions
void RemoveReshapeLayer(ReshapeLayer *baseLayer, std::map< LayerGuid, Layer * > &untouched, OptimizationViews &optimizationViews)
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer * > untouched)
constexpr const char * RefBackendId()
void FoldPadLayer2d(OptimizationViews &optimizationViews, LayerT *baseLayer, Descriptor &descriptor, PadLayer *padLayer)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
A Convolution2dDescriptor for the Convolution2dLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A Pooling2dDescriptor for the Pooling2dLayer.