ArmNN
 24.08
SplitterLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "SplitterLayer.hpp"
6 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
13 
14 namespace armnn
15 {
16 
17 SplitterLayer::SplitterLayer(const ViewsDescriptor& param, const char* name)
18  : LayerWithParameters(1, param.GetNumViews(), LayerType::Splitter, param, name)
19 {
20 }
21 
22 std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory& factory) const
23 {
24  SplitterQueueDescriptor descriptor;
25 
26  // Copies the window origins to the descriptor.
27  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
28  {
29  descriptor.m_ViewOrigins.emplace_back(
30  std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions()));
31  }
32 
33  SetAdditionalInfo(descriptor);
34 
35  return factory.CreateWorkload(LayerType::Splitter, descriptor, PrepInfoAndDesc(descriptor));
36 }
37 
38 template<typename FactoryType>
39 void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
40  const FactoryType& factory,
41  bool isMemoryManaged)
42 {
43  //If sub tensors are supported than all the "splitter" need to do is to
44  //set the outputs to be appropriate sub tensors of the input.
45  bool useSubTensors = factory.SupportsSubTensors();
46 
47  if (useSubTensors)
48  {
49  // Get outputHandler of previous layer
50  const OutputHandler& outputHandler = GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
51  const OutputSlot* slot = GetInputSlots()[0].GetConnectedOutputSlot();
52  const TensorInfo& parentInfo = GetInputSlot(0).GetTensorInfo();
53 
54  ITensorHandle* inputData = outputHandler.GetData();
55 
56  std::vector<std::unique_ptr<ITensorHandle>> subTensors;
57 
58  // check if split is along the x or y (2 innermost dimensions)
59  auto numberOfDimensions = m_Param.GetNumDimensions();
60 
61  std::set<unsigned int> axis = ComputeSplitAxis(m_Param, parentInfo.GetShape());
62  std::set<unsigned int>::iterator axisIt = axis.begin();
63 
64  bool isOnXorY = m_Param.GetNumDimensions() >= 3 &&
65  ((*axisIt == numberOfDimensions - 1) ||
66  (*axisIt == numberOfDimensions - 2));
67 
68  //Creates the outputs as subtensors of the input.
69  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
70  {
71  const TensorInfo& info = m_OutputHandlers[i].GetTensorInfo();
72 
73  OutputSlot& outSlot = GetOutputSlot(i);
75 
76  const unsigned int numOutputSlots = GetNumOutputSlots();
77 
78  // if split along x or y (2 innermost dimensions) and the next layers do not require padding
79  bool canUseSubTensorOnXorY = true;
80  bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
81  if (isTensorHandleFactory)
82  {
83  for (unsigned int it = 0; it < numOutputSlots; ++it)
84  {
85  InputSlot* inputSlot = GetOutputSlot(it).GetConnection(0);
86  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
87  std::vector<Capability> capabilities =
88  handleFactory->GetCapabilities(&(inputSlot->GetOwningLayer()),
89  this,
91  if (isOnXorY)
92  {
93  canUseSubTensorOnXorY = false;
94  if (capabilities.empty())
95  {
96  canUseSubTensorOnXorY = true;
97  }
98  }
99 
100  if (!canUseSubTensorOnXorY)
101  {
102  break;
103  }
104  }
105  }
106 
107  auto CreateSubTensor = [&]()
108  {
109  // Make sure:
110  // 1) quantization parameters are in the same space
111  // 2) the same TensorHandleFactory is used for input and split layer output
112  // 3) the output does not go to a Constant layer or input layer
113  // 4) if split along x or y (2 innermost dimensions) and the next layers do not require padding
114  if (parentInfo.IsTypeSpaceMatch(info) && //(1)
115  factoryId == slot->GetTensorHandleFactoryId() && //(2)
118  canUseSubTensorOnXorY) //(4)
119  {
121  return factory.CreateSubTensorHandle(*inputData,
122  info.GetShape(),
123  this->m_Param.GetViewOrigin(i));
125  }
126  return std::unique_ptr<ITensorHandle>();
127  };
128 
129  auto subTensor = CreateSubTensor();
130  if (!subTensor)
131  {
132  useSubTensors = false;
133  break; //Failed to create a valid sub-tensor, so stop trying with the rest of the views.
134  }
135  subTensors.push_back(std::move(subTensor));
136  }
137 
138  if (useSubTensors)
139  {
140  unsigned int i = 0;
141  for (auto& subTensor : subTensors)
142  {
143  m_OutputHandlers[i].SetData(std::move(subTensor));
144  ++i;
145  }
146  }
147  }
148 
149  if (!useSubTensors)
150  {
151  for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
152  {
153  m_OutputHandlers[i].CreateTensorHandles(factory, isMemoryManaged);
154  }
155  }
156 }
157 
159  const IWorkloadFactory& workloadFactory,
160  const bool isMemoryManaged)
161 {
162  OutputSlot& slot = GetOutputSlot(0);
164 
165  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
166  {
167  CreateTensors(registry, workloadFactory, isMemoryManaged);
168  }
169  else
170  {
171  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
172  if (!handleFactory)
173  {
174  throw armnn::NullPointerException("handleFactory is returning a nullptr.");
175  }
176  CreateTensors(registry, *handleFactory, isMemoryManaged);
177  }
178 }
179 
181 {
182  return CloneBase<SplitterLayer>(graph, m_Param, GetName());
183 }
184 
185 std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
186 {
187  if (inputShapes.size() != m_Param.GetNumViews())
188  {
189  throw armnn::Exception("inputShapes' and m_NumViews' sizes do not match (\""
190  + std::to_string(inputShapes.size()) +
191  "\" vs \""
192  + std::to_string(m_Param.GetNumViews()) + "\")");
193  }
194 
195  std::vector<TensorShape> outShapes;
196  //Output shapes must match View shapes.
197  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
198  {
199  const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
200  outShapes.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
201  }
202  return outShapes;
203 }
204 
206 {
207  std::for_each(BeginOutputSlots(), EndOutputSlots(), [&](OutputSlot& outputSlot)
208  {
210  });
211 
212  std::vector<TensorShape> views;
213  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
214  {
215  const uint32_t* sizes = m_Param.GetViewSizes(viewIdx);
216  views.push_back(TensorShape(m_Param.GetNumDimensions(), sizes));
217  }
218 
219  auto inferredShapes = InferOutputShapes(views);
220 
221  if (inferredShapes.size() != m_Param.GetNumViews())
222  {
223  throw armnn::LayerValidationException("inferredShapes' size and m_NumViews do not match (\""
224  + std::to_string(inferredShapes.size()) +
225  "\" vs \""
226  + std::to_string(m_Param.GetNumViews()) + "\")");
227  }
228 
229  for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
230  {
231  ValidateAndCopyShape(GetOutputSlot(viewIdx).GetTensorInfo().GetShape(),
232  inferredShapes[viewIdx],
234  "SplitterLayer",
235  viewIdx);
236  }
237 }
238 
240 {
241  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
242 }
243 
244 } // namespace armnn
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::SplitterLayer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: SplitterLayer.cpp:239
armnn::LayerType::Splitter
@ Splitter
armnn::SplitterLayer
This layer represents a split operation.
Definition: SplitterLayer.hpp:13
armnn::InputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:100
armnn::Splitter
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle * > inputs, std::vector< ITensorHandle * > outputs)
Definition: Splitter.hpp:17
WorkloadData.hpp
WorkloadUtils.hpp
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::OutputSlot
Definition: Layer.hpp:100
TypesUtils.hpp
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::SplitterLayer::SplitterLayer
SplitterLayer(const ViewsDescriptor &param, const char *name)
Constructor to create a SplitterLayer.
Definition: SplitterLayer.cpp:17
armnn::SplitterLayer::ValidateTensorShapesFromInputs
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of SplitterLayer.
Definition: SplitterLayer.cpp:205
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:457
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::Layer::m_OutputHandlers
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:440
armnn::Layer::GetInputSlots
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:258
armnn::IStrategy
Definition: IStrategy.hpp:16
armnn::SplitterQueueDescriptor::m_ViewOrigins
std::vector< ViewOrigin > m_ViewOrigins
Definition: WorkloadData.hpp:124
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::LayerWithParameters< ViewsDescriptor >::GetParameters
const ViewsDescriptor & GetParameters() const override
Definition: LayerWithParameters.hpp:19
WorkloadFactory.hpp
armnn::OutputHandler::GetData
ITensorHandle * GetData() const
Gets the allocated tensor memory.
Definition: OutputHandler.hpp:46
armnn::LayerWithParameters
Definition: LayerWithParameters.hpp:14
armnn::SplitterLayer::CreateTensorHandles
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
Definition: SplitterLayer.cpp:158
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::ITensorHandleFactory::GetCapabilities
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
Definition: ITensorHandleFactory.hpp:93
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:614
armnn::ViewsDescriptor::GetViewSizes
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
Definition: Descriptors.cpp:347
armnn::SplitterQueueDescriptor
Definition: WorkloadData.hpp:111
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::SplitterLayer::Clone
SplitterLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Definition: SplitterLayer.cpp:180
armnn::LayerWithParameters< ViewsDescriptor >::m_Param
ViewsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
Definition: LayerWithParameters.hpp:52
armnn::LayerWithParameters< ViewsDescriptor >::PrepInfoAndDesc
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Definition: LayerWithParameters.hpp:44
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::Layer::GetNumOutputSlots
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:335
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:526
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition: RefWorkloadUtils.hpp:33
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:303
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::OutputHandler
Definition: OutputHandler.hpp:28
armnn::BoostLogSeverityMapping::info
@ info
armnn::InputSlot
Definition: Layer.hpp:42
armnn::CapabilityClass::PaddingRequired
@ PaddingRequired
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::SplitterLayer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
Definition: SplitterLayer.cpp:185
armnn::Layer::BeginOutputSlots
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:266
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:307
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:218
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
Definition: WorkloadUtils.cpp:377
armnn::ViewsDescriptor::GetNumViews
uint32_t GetNumViews() const
Get the number of views.
Definition: Descriptors.cpp:302
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::LayerType::Input
@ Input
armnn::NullPointerException
Definition: Exceptions.hpp:146
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:441
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::OutputSlot::GetConnection
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:83
armnn::Layer::EndOutputSlots
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition: Layer.hpp:267
armnn::Graph
Definition: Graph.hpp:30
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
LayerCloneBase.hpp
SplitterLayer.hpp
armnn::LayerType::Constant
@ Constant
armnn::ViewsDescriptor::GetViewOrigin
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
Definition: Descriptors.cpp:312
armnn::SplitterLayer::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Splitter type.
Definition: SplitterLayer.cpp:22