ArmNN
 25.11
Loading...
Searching...
No Matches
SplitterLayer.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#include "SplitterLayer.hpp"
6
7#include "LayerCloneBase.hpp"
8
13
14namespace armnn
15{
16
17SplitterLayer::SplitterLayer(const ViewsDescriptor& param, const char* name)
18 : LayerWithParameters(1, param.GetNumViews(), LayerType::Splitter, param, name)
19{
20}
21
22std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory& factory) const
23{
24 SplitterQueueDescriptor descriptor;
25
26 // Copies the window origins to the descriptor.
27 for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
28 {
29 descriptor.m_ViewOrigins.emplace_back(
30 std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
31 }
32
33 SetAdditionalInfo(descriptor);
34
35 return factory.CreateWorkload(LayerType::Splitter, descriptor, PrepInfoAndDesc(descriptor));
36}
37
38template<typename FactoryType>
39void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
40 const FactoryType& factory,
41 bool isMemoryManaged)
42{
43 //If sub tensors are supported than all the "splitter" need to do is to
44 //set the outputs to be appropriate sub tensors of the input.
45 bool useSubTensors = factory.SupportsSubTensors();
46
47 if (useSubTensors)
48 {
49 // Get outputHandler of previous layer
50 const OutputHandler& outputHandler = GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
51 const OutputSlot* slot = GetInputSlots()[0].GetConnectedOutputSlot();
52 const TensorInfo& parentInfo = GetInputSlot(0).GetTensorInfo();
53
54 ITensorHandle* inputData = outputHandler.GetData();
55
56 std::vector<std::unique_ptr<ITensorHandle>> subTensors;
57
58 // check if split is along the x or y (2 innermost dimensions)
59 auto numberOfDimensions = m_Param.GetNumDimensions();
60
61 std::set<unsigned int> axis = ComputeSplitAxis(m_Param, parentInfo.GetShape());
62 std::set<unsigned int>::iterator axisIt = axis.begin();
63
64 bool isOnXorY = m_Param.GetNumDimensions() >= 3 &&
65 ((*axisIt == numberOfDimensions - 1) ||
66 (*axisIt == numberOfDimensions - 2));
67
68 //Creates the outputs as subtensors of the input.
69 for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
70 {
71 const TensorInfo& info = m_OutputHandlers[i].GetTensorInfo();
72
73 OutputSlot& outSlot = GetOutputSlot(i);
75
76 const unsigned int numOutputSlots = GetNumOutputSlots();
77
78 // if split along x or y (2 innermost dimensions) and the next layers do not require padding
79 bool canUseSubTensorOnXorY = true;
80 bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
81 if (isTensorHandleFactory)
82 {
83 for (unsigned int it = 0; it < numOutputSlots; ++it)
84 {
85 InputSlot* inputSlot = GetOutputSlot(it).GetConnection(0);
86 ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
87 std::vector<Capability> capabilities =
88 handleFactory->GetCapabilities(&(inputSlot->GetOwningLayer()),
89 this,
91 if (isOnXorY)
92 {
93 canUseSubTensorOnXorY = false;
94 if (capabilities.empty())
95 {
96 canUseSubTensorOnXorY = true;
97 }
98 }
99
100 if (!canUseSubTensorOnXorY)
101 {
102 break;
103 }
104 }
105 }
106
107 auto CreateSubTensor = [&]()
108 {
109 // Make sure:
110 // 1) quantization parameters are in the same space
111 // 2) the same TensorHandleFactory is used for input and split layer output
112 // 3) the output does not go to a Constant layer or input layer
113 // 4) if split along x or y (2 innermost dimensions) and the next layers do not require padding
114 if (parentInfo.IsTypeSpaceMatch(info) && //(1)
115 factoryId == slot->GetTensorHandleFactoryId() && //(2)
116 GetOutputSlot(i).GetConnection(0)->GetOwningLayer().GetType() != LayerType::Constant && //(3)
117 GetOutputSlot(i).GetConnection(0)->GetOwningLayer().GetType() != LayerType::Input && //(3)
118 canUseSubTensorOnXorY) //(4)
119 {
121 return factory.CreateSubTensorHandle(*inputData,
122 info.GetShape(),
123 this->m_Param.GetViewOrigin(i));
125 }
126 return std::unique_ptr<ITensorHandle>();
127 };
128
129 auto subTensor = CreateSubTensor();
130 if (!subTensor)
131 {
132 useSubTensors = false;
133 break; //Failed to create a valid sub-tensor, so stop trying with the rest of the views.
134 }
135 subTensors.push_back(std::move(subTensor));
136 }
137
138 if (useSubTensors)
139 {
140 unsigned int i = 0;
141 for (auto& subTensor : subTensors)
142 {
143 m_OutputHandlers[i].SetData(std::move(subTensor));
144 ++i;
145 }
146 }
147 }
148
149 if (!useSubTensors)
150 {
151 for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
152 {
153 m_OutputHandlers[i].CreateTensorHandles(factory, isMemoryManaged);
154 }
155 }
156}
157
159 const IWorkloadFactory& workloadFactory,
160 const bool isMemoryManaged)
161{
162 OutputSlot& slot = GetOutputSlot(0);
164
166 {
167 CreateTensors(registry, workloadFactory, isMemoryManaged);
168 }
169 else
170 {
171 ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
172 if (!handleFactory)
173 {
174 throw armnn::NullPointerException("handleFactory is returning a nullptr.");
175 }
176 CreateTensors(registry, *handleFactory, isMemoryManaged);
177 }
178}
179
181{
182 return CloneBase<SplitterLayer>(graph, m_Param, GetName());
183}
184
185std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
186{
187 if (inputShapes.size() != m_Param.GetNumViews())
188 {
189 throw armnn::Exception("inputShapes' and m_NumViews' sizes do not match (\""
190 + std::to_string(inputShapes.size()) +
191 "\" vs \""
192 + std::to_string(m_Param.GetNumViews()) + "\")");
193 }
194
195 std::vector<TensorShape> outShapes;
196 //Output shapes must match View shapes.
197 for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
198 {
199 const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
200 outShapes.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
201 }
202 return outShapes;
203}
204
206{
207 std::for_each(BeginOutputSlots(), EndOutputSlots(), [&](OutputSlot& outputSlot)
208 {
210 });
211
212 std::vector<TensorShape> views;
213 for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
214 {
215 const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
216 views.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
217 }
218
219 auto inferredShapes = InferOutputShapes(views);
220
221 if (inferredShapes.size() != m_Param.GetNumViews())
222 {
223 throw armnn::LayerValidationException("inferredShapes' size and m_NumViews do not match (\""
224 + std::to_string(inferredShapes.size()) +
225 "\" vs \""
226 + std::to_string(m_Param.GetNumViews()) + "\")");
227 }
228
229 for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
230 {
232 inferredShapes[viewIdx],
234 "SplitterLayer",
235 viewIdx);
236 }
237}
238
240{
241 strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
242}
243
244} // namespace armnn
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_END
Base class for all ArmNN exceptions so that users can filter to just those.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
static const FactoryId LegacyFactoryId
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
Layer & GetOwningLayer() const
Definition Layer.hpp:53
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition Layer.cpp:614
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition Layer.hpp:335
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition Layer.cpp:526
const std::vector< InputSlot > & GetInputSlots() const
Definition Layer.hpp:258
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition Layer.hpp:266
LayerType * CloneBase(Graph &graph, Params &&... params) const
std::vector< OutputHandler > m_OutputHandlers
Definition Layer.hpp:440
const char * GetName() const override
Returns the name of the layer.
Definition Layer.hpp:332
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition Layer.hpp:286
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition Layer.cpp:457
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition Layer.cpp:303
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition Layer.hpp:267
ShapeInferenceMethod m_ShapeInferenceMethod
Definition Layer.hpp:441
LayerWithParameters(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const ViewsDescriptor &param, const char *name)
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
const ViewsDescriptor & GetParameters() const override
ITensorHandle * GetData() const
Gets the allocated tensor memory.
const InputSlot * GetConnection(unsigned int index) const override
Definition Layer.cpp:83
const TensorInfo & GetTensorInfo() const override
Definition Layer.cpp:100
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition Layer.cpp:218
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
SplitterLayer(const ViewsDescriptor &param, const char *name)
Constructor to create a SplitterLayer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of SplitterLayer.
SplitterLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Splitter type.
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
const TensorShape & GetShape() const
Definition Tensor.hpp:193
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition Tensor.cpp:432
Copyright (c) 2021 ARM Limited and Contributors.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle * > inputs, std::vector< ITensorHandle * > outputs)
Definition Splitter.hpp:17
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
std::vector< ViewOrigin > m_ViewOrigins
A ViewsDescriptor for the SplitterLayer.
uint32_t GetNumViews() const
Get the number of views.
uint32_t GetNumDimensions() const
Get the number of dimensions.