ArmNN
 25.11
Loading...
Searching...
No Matches
SubgraphUtils.hpp
Go to the documentation of this file.
1//
2// Copyright © 2022-2025 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
11#include <type_traits>
12
13namespace armnn
14{
15
16namespace
17{
18
19/// Checks if a Layer has a DataLayout that is either NCHW or NCDHW.
20class CheckForNCHW : public StrategyBase<NoThrowStrategy>
21{
22public:
23 CheckForNCHW()
24 {}
25
26 void ExecuteStrategy(const armnn::IConnectableLayer* layer,
27 const armnn::BaseDescriptor& descriptor,
28 const std::vector<armnn::ConstTensor>& constants,
29 const char* name,
30 const armnn::LayerBindingId id = 0) override
31 {
32 armnn::IgnoreUnused(layer, constants, id, name);
33 switch (layer->GetType())
34 {
36 {
37 auto desc = static_cast<const armnn::BatchMatMulDescriptor&>(descriptor);
38 m_Result = desc.m_DataLayoutX == DataLayout::NCHW || desc.m_DataLayoutY == DataLayout::NCHW;
39 break;
40 }
42 {
43 CheckDescForNCHW(static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor));
44 break;
45 }
47 {
48 CheckDescForNCHW(static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor));
49 break;
50 }
52 {
53 CheckDescForNCHW(static_cast<const armnn::Convolution2dDescriptor&>(descriptor));
54 break;
55 }
57 {
58 CheckDescForNCHW(static_cast<const armnn::Convolution3dDescriptor&>(descriptor));
59 break;
60 }
62 {
63 CheckDescForNCHW(static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor));
64 break;
65 }
67 {
68 CheckDescForNCHW(static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor));
69 break;
70 }
72 {
73 CheckDescForNCHW(static_cast<const armnn::L2NormalizationDescriptor&>(descriptor));
74 break;
75 }
77 {
78 CheckDescForNCHW(static_cast<const armnn::NormalizationDescriptor&>(descriptor));
79 break;
80 }
82 {
83 CheckDescForNCHW(static_cast<const armnn::Pooling2dDescriptor&>(descriptor));
84 break;
85 }
87 {
88 CheckDescForNCHW(static_cast<const armnn::Pooling3dDescriptor&>(descriptor));
89 break;
90 }
92 {
93 CheckDescForNCHW(static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor));
94 break;
95 }
97 {
98 CheckDescForNCHW(static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor));
99 break;
100 }
102 {
103 CheckDescForNCHW(static_cast<const armnn::StridedSliceDescriptor&>(descriptor));
104 break;
105 }
106 default:
107 {
108 m_Result = false;
109 }
110 }
111 }
112
113 /// Returns true if the Layer had a DataLayout and it was NCHW or NCDHW.
114 /// Returns false if the Layer either doesn't have a DataLayout or if it
115 /// had a DataLayout that was neither NCHW nor NCDHW.
116 bool Result()
117 {
118 return m_Result;
119 }
120
121private:
122 template<typename Descriptor>
123 void CheckDescForNCHW(const Descriptor& descriptor)
124 {
125 m_Result = (descriptor.m_DataLayout == DataLayout::NCHW) || (descriptor.m_DataLayout == DataLayout::NCDHW);
126 }
127
128 bool m_Result = false;
129};
130
131//
132// this helper only works if all layers where the inputs connect to are not selected
133//
134
135SubgraphView::IInputSlots CreateIInputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
136{
138 for (auto&& layer : layers)
139 {
140 for (unsigned int i = 0 ; i < layer->GetNumInputSlots(); ++i)
141 {
142 result.push_back(&(layer->GetInputSlot(i)));
143 }
144 }
145 return result;
146}
147
148//
149// this helper only works if all layers where the outputs connect to are not selected
150//
151
152SubgraphView::IOutputSlots CreateIOutputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
153{
155 for (auto &&layer: layers)
156 {
157 for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
158 {
159 result.push_back(&(layer->GetOutputSlot(i)));
160 }
161 }
162 return result;
163}
164
165// Type used to hold the slot numbers to create the lists from. There should
166// be a SlotList for each layer in the layers list
167typedef std::vector<int> SlotList;
168
169template<typename ILayerType>
170SubgraphView::IInputSlots CreateIInputsFromSlotLists(const std::vector<ILayerType*>& layers,
171 const std::vector<SlotList>& layersSlotLists)
172{
173 ARMNN_THROW_INVALIDARG_IF_FALSE(layersSlotLists.size() == layers.size());
174
176
177 for (unsigned int layerIdx = 0; layerIdx < layers.size(); ++layerIdx)
178 {
179 const SlotList& slotList = layersSlotLists[layerIdx];
180 for (unsigned int slotIdx = 0 ; slotIdx < layers[layerIdx]->GetNumInputSlots(); ++slotIdx)
181 {
182 if (std::find(slotList.begin(), slotList.end(), slotIdx) != slotList.end())
183 {
184 result.push_back(&(layers[layerIdx]->GetInputSlot(slotIdx)));
185 }
186 }
187 }
188 return result;
189}
190
191template<typename ILayerType>
192SubgraphView::IOutputSlots CreateIOutputsFromSlotLists(const std::vector<ILayerType*>& layers,
193 const std::vector<SlotList>& layersSlotLists)
194{
195 ARMNN_THROW_INVALIDARG_IF_FALSE(layersSlotLists.size() == layers.size());
196
198 for (unsigned int layerIdx = 0; layerIdx < layers.size(); ++layerIdx)
199 {
200 const SlotList& slotList = layersSlotLists[layerIdx];
201 for (unsigned int slotIdx = 0; slotIdx < layers[layerIdx]->GetNumOutputSlots(); ++slotIdx)
202 {
203 bool foundIt = std::find(slotList.begin(), slotList.end(), slotIdx) != slotList.end();
204 if (foundIt)
205 {
206 result.push_back(&(layers[layerIdx]->GetOutputSlot(slotIdx)));
207 }
208 }
209 }
210 return result;
211}
212}
213
215{
216 // namespace for holding template constraints related to fold pad functions
217 // and for static asserts to prevent function misuse
218 template <class>
219 inline constexpr bool alwaysFalse = false;
220
221 template <typename L, typename D>
222 struct IsValidPair : std::false_type {};
223
224 // template specialization of IsValidPair for allowed pairings of layers and descriptors
225 template <>
226 struct IsValidPair<Pooling2dLayer, Pooling2dDescriptor> : std::true_type {};
227
228 template <>
230
231 template <>
233
234} // namespace FoldPadConstraints
235
236inline bool IsNCHW(armnn::Layer& layer)
237{
238 CheckForNCHW check;
239 layer.ExecuteStrategy(check);
240 return check.Result();
241}
242
243inline void ReportUntouchedLayers(OptimizationViews& optimizationViews, std::map<LayerGuid, Layer*> untouched)
244{
245 std::vector<Layer*> untouchedVector;
246 for (const auto& pair : untouched)
247 {
248 Layer* layer = pair.second;
249 SubgraphView subgraphView({layer},
250 CreateIInputsFrom({layer}),
251 CreateIOutputsFrom({layer}));
252 optimizationViews.AddUntouchedSubgraph(std::move(subgraphView));
253 }
254}
255
256template<typename LayerType>
258 LayerType* baseLayer,
259 LayerType* replacementLayer)
260{
261 SubgraphView substitutionSubgraph({baseLayer},
262 CreateIInputsFrom({baseLayer}),
263 CreateIOutputsFrom({baseLayer}));
264
265 SubgraphView replacementSubgraph({replacementLayer},
266 CreateIInputsFrom({replacementLayer}),
267 CreateIOutputsFrom({replacementLayer}));
268
269
270 optimizationViews.AddSubstitution({substitutionSubgraph, replacementSubgraph});
271
272 return replacementLayer;
273}
274
275template<typename LayerType>
277 LayerType* baseLayer,
278 LayerType* replacementLayer,
279 PadLayer* padLayer)
280{
281 SubgraphView substitutionSubgraph({padLayer, baseLayer},
282 CreateIInputsFrom({padLayer}),
283 CreateIOutputsFrom({baseLayer}));
284 SubgraphView replacementSubgraph(replacementLayer);
285
286 optimizationViews.AddSubstitution({substitutionSubgraph, replacementSubgraph});
287
288 return replacementLayer;
289}
290
291/// Checks if the Layer is connected to any Layer that has an NCHW layout.
292inline bool ConnectedToLayerWithNCHW(Layer* baseLayer)
293{
294 Layer& parentLayer = baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
295
296 if (IsNCHW(parentLayer))
297 {
298 return true;
299 }
300 for (unsigned int i = 0; i < baseLayer->GetOutputSlot(0).GetNumConnections(); ++i)
301 {
302 Layer& nextLayer = baseLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer();
303 if (IsNCHW(nextLayer))
304 {
305 return true;
306 }
307 }
308 return false;
309}
310
311/// Checks the Layer's Connections to see if it's connected to a Layer with the provided layerType. If dimSize is
312/// provided will also check if the connecting Tensor has more than that number of dimensions
313inline bool ConnectedToLayerType(Layer* baseLayer, LayerType layerType, unsigned int dimSize = 0)
314{
315 Layer& parentLayer = baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
316 TensorInfo parentTensorInfo = baseLayer->GetInputSlot(0).GetTensorInfo();
317
318 if (parentTensorInfo.GetNumDimensions() > dimSize && parentLayer.GetType() == layerType)
319 {
320 return true;
321 }
322 for (unsigned int i = 0; i < baseLayer->GetOutputSlot(0).GetNumConnections(); ++i)
323 {
324 Layer& nextLayer = baseLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer();
325 TensorInfo nextTensorInfo = baseLayer->GetOutputSlot(0).GetConnection(i)->GetTensorInfo();
326
327 if (nextTensorInfo.GetNumDimensions() > dimSize && nextLayer.GetType() == layerType)
328 {
329 return true;
330 }
331 }
332 return false;
333}
334
335inline void RemoveReshapeLayer(ReshapeLayer* baseLayer,
336 std::map<LayerGuid, Layer*>& untouched,
337 OptimizationViews& optimizationViews)
338{
339 if (baseLayer == nullptr)
340 {
341 return;
342 }
343 ReshapeDescriptor reshapeDescriptor = baseLayer->GetParameters();
344 Layer& parentLayer = baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
345
346 // Cannot currently remove the Reshape if it's connected to an Input, Constant or Splitter
347 if (parentLayer.GetType() == LayerType::Input || parentLayer.GetType() == LayerType::Constant)
348 {
349 return;
350 }
351
352 // Cannot currently remove the Reshape if it's connected to an OutputSlot or Concat
353 for (unsigned int i = 0; i < baseLayer->GetOutputSlot(0).GetNumConnections(); ++i)
354 {
355 Layer& nextLayer = baseLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer();
356
357 if (nextLayer.GetType() == LayerType::Output)
358 {
359 return;
360 }
361 }
362 auto it = untouched.find(baseLayer->GetGuid());
363 if (it == untouched.end())
364 {
365 // Already removed from map
366 return;
367 }
368 untouched.erase(it);
369
370 // Override the InputSlot TensorInfos for all the layers connected to the Reshape's OutputSlot
371 for (unsigned int i = 0; i < baseLayer->GetOutputSlot(0).GetNumConnections(); ++i)
372 {
373 Layer& nextLayer = baseLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer();
374 auto inputIndex = baseLayer->GetOutputSlot(0).GetConnection(i)->GetSlotIndex();
375 TensorInfo reshapeInfo(baseLayer->GetOutputSlot(0).GetTensorInfo());
376 reshapeInfo.SetShape(reshapeDescriptor.m_TargetShape);
377 nextLayer.GetInputSlot(inputIndex).SetTensorInfo(reshapeInfo);
378 }
379 optimizationViews.AddDeletedSubgraph(baseLayer);
380}
381
382
383template<typename LayerT, typename Descriptor>
384void FoldPadLayer2d(OptimizationViews& optimizationViews,
385 LayerT* baseLayer,
386 Descriptor& descriptor,
387 PadLayer* padLayer)
388{
389
390 // Enforce that the function is called with a valid combination of layertype and descriptors
392 "FoldPadLayer2d() called with an unsupported (LayerType, Descriptor) combination!");
393
394 IConnectableLayer* replacement = nullptr;
395 const std::string name = std::string("folded-") + padLayer->GetName() + "-into-" + baseLayer->GetName();
396 if constexpr (std::is_same_v<LayerT, Pooling2dLayer>)
397 {
398 replacement = optimizationViews.GetINetwork()->AddPooling2dLayer(descriptor, name.c_str());
399 LayerT* replacementLayer = PolymorphicDowncast<LayerT*>(replacement);
400 FoldPadLayer(optimizationViews,
401 baseLayer,
402 replacementLayer,
403 padLayer);
404 }
405 else if constexpr (std::is_same_v<LayerT, Convolution2dLayer> ||
406 std::is_same_v<LayerT, DepthwiseConvolution2dLayer>)
407 {
408 // DepthwiseConv2d and Conv2d pad fold is being done by creating a new layer and subsitituing
409 // the existing conv after updating the padding descriptor with TryFoldPadIntoLayer2d
410 // We then mark the pad layer for deletion
411 // this prevents a mismatch in the number of expected input slots on the optimized layer
412 // i.e. pad has 1 input slot but conv2d has 3 (1 input and 2 constants which show as input slots)
413 if constexpr (std::is_same_v<LayerT, Convolution2dLayer>)
414 {
415 replacement = optimizationViews.GetINetwork()->AddConvolution2dLayer(descriptor, name.c_str());
416 }
417 else
418 {
419 replacement = optimizationViews.GetINetwork()->AddDepthwiseConvolution2dLayer(descriptor, name.c_str());
420 }
421 LayerT* replacementLayer = PolymorphicDowncast<LayerT*>(replacement);
422 SubgraphView layerToDelete(padLayer);
423 optimizationViews.AddDeletedSubgraph(std::move(layerToDelete));
424 ReplaceLayer(optimizationViews,
425 baseLayer,
426 replacementLayer);
427 }
428 else
429 {
431 "FoldPadLayer2d() called with an unsupported LayerType");
432 }
433}
434
435//
436// Layer sequence detection such as add + mul + add ( + optional activation )
437//
438
439inline bool IsSequenceLayerType(Layer& layer, LayerType type)
440{
441 return layer.GetType() == type;
442}
443
445{
446 return (layer.GetType() == LayerType::ElementwiseBinary) &&
447 (PolymorphicDowncast<ElementwiseBinaryLayer*>(&layer)->GetParameters().m_Operation == type);
448}
449
450// Detect a layer sequence and activation if specified. The activation must be at the end of the sequence.
451template<typename TYPE>
452bool IsLayerSequence(Layer& currentLayer,
453 TYPE first,
454 TYPE second,
455 TYPE third,
456 Layer* layerList[4],
457 bool handleValidActivates,
458 const std::vector<ActivationFunction>& validActivates)
459{
460 auto PreviousLayer = [](Layer& layer)
461 {
462 return &layer.GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
463 };
464
465 auto NextLayer = [](Layer& layer)
466 {
467 return &layer.GetOutputSlot(0).GetConnection(0)->GetOwningLayer();
468 };
469
470 auto LayerIncomingConnectionDataType = [](Layer& layer)
471 {
472 return layer.GetInputSlot(0).GetTensorInfo().GetDataType();
473 };
474
475 bool result = false;
476
477 // Match in reverse so there is only 1 connection to check
478 if (IsSequenceLayerType(currentLayer, third))
479 {
480 // Save DataType of third layer
481 DataType dataType = LayerIncomingConnectionDataType(currentLayer);
482
483 // Save third layer
484 layerList[2] = &currentLayer;
485
486 // Check the layers that proceed this one for the requested grouping
487 Layer *prevLayer = PreviousLayer(currentLayer);
488 if (prevLayer && IsSequenceLayerType(*prevLayer, second))
489 {
490 bool dataTypesMatch = (dataType == LayerIncomingConnectionDataType(*prevLayer));
491 if (! dataTypesMatch)
492 {
493 return result;
494 }
495
496 layerList[1] = prevLayer;
497 prevLayer = PreviousLayer(*prevLayer);
498 if (prevLayer && IsSequenceLayerType(*prevLayer, first))
499 {
500 dataTypesMatch = (dataType == LayerIncomingConnectionDataType(*prevLayer));
501 if (! dataTypesMatch)
502 {
503 return result;
504 }
505
506 layerList[0] = prevLayer;
507
508 // Detected the first 3 layers if we get to this point so now
509 // check to see if we have a valid activation. If there is no activation
510 // then the sequence still matches.
511 if (handleValidActivates)
512 {
513 Layer *nextLayer = NextLayer(currentLayer);
514 if (nextLayer)
515 {
517 {
518 // This layer is an activation, so it must be a valid type for the sequence
519 ActivationFunction activationFunction =
520 PolymorphicDowncast<ActivationLayer*>(nextLayer)->GetParameters().m_Function;
521 long count = std::count(validActivates.cbegin(),
522 validActivates.cend(),
523 activationFunction);
524 if (count > 0)
525 {
526 layerList[3] = nextLayer;
527 result = true;
528 }
529 }
530 else
531 {
532 // Next layer is not an activation so sequence still matches
533 result = true;
534 }
535 }
536 }
537 else
538 {
539 result = true;
540 }
541 }
542 }
543 }
544
545 return result;
546}
547
548// OpBlockSequencer reorders blocks based on the availability of their input tensors.
549// If all of a block’s input tensors are already known, the block is added to the list immediately;
550// otherwise, it is queued until its inputs become available.
551template<typename LayerT, typename BlockT>
553{
554public:
555 struct Pair
556 {
557 LayerT* layer;
558 BlockT* block;
559 };
560
561 OpBlockSequencer() = default;
562 ~OpBlockSequencer() = default;
563
564 void Add(LayerT* layer, BlockT* block)
565 {
566 if (HasInputs(block))
567 {
568 AddReady({layer, block});
569 ProcessPending();
570 }
571 else
572 {
573 m_Pending.emplace_back(Pair{layer,block});
574 }
575 }
576
577 std::list<Pair>& Finish()
578 {
579 ProcessPending();
580 if (m_Pending.size())
581 {
582 std::stringstream stm;
583 stm << "[OpBlockSequencer] " << m_Pending.size();
584 stm << " blocks could not be processed!";
585 throw std::invalid_argument(stm.str());
586 }
587 return m_Ready;
588 }
589private:
590 bool HasInputs(BlockT* block)
591 {
592 for (auto& inputTensorName : block->GetInputs())
593 {
594 if (inputTensorName.find("input") != std::string::npos)
595 {
596 continue;
597 }
598
599 if (inputTensorName.find("constant") != std::string::npos)
600 {
601 continue;
602 }
603
604 if (m_TensorMap.find(inputTensorName) == m_TensorMap.end())
605 {
606 return false;
607 }
608 }
609 return true;
610 }
611
612 void AddReady(Pair&& pair)
613 {
614 m_Ready.emplace_back(pair);
615 for (auto & outputTensor : pair.block->GetOutputs())
616 {
617 m_TensorMap[outputTensor] = 1;
618 }
619 }
620
621 void ProcessPending()
622 {
623 auto itr = m_Pending.begin();
624 while (itr != m_Pending.end())
625 {
626 if (HasInputs((*itr).block))
627 {
628 AddReady(std::move(*itr));
629 itr = m_Pending.erase(itr);
630 }
631 else
632 {
633 ++itr;
634 }
635 }
636 }
637private:
638 std::list<Pair> m_Ready;
639 std::list<Pair> m_Pending;
640 std::unordered_map<std::string, uint32_t> m_TensorMap;
641};
642
643} // namespace armnn
#define ARMNN_THROW_INVALIDARG_IF_FALSE(_cond)
This layer represents a convolution 2d operation.
This layer represents a depthwise convolution 2d operation.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
virtual const TensorInfo & GetTensorInfo() const =0
Gets the TensorInfo for this InputSlot.
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D convolution layer to the network.
Definition Network.cpp:272
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D depthwise convolution layer to the network.
Definition Network.cpp:292
IConnectableLayer * AddPooling2dLayer(const Pooling2dDescriptor &pooling2dDescriptor, const char *name=nullptr)
Adds a 2D pooling layer to the network.
Definition Network.cpp:356
virtual const IInputSlot * GetConnection(unsigned int index) const =0
void SetTensorInfo(const TensorInfo tensorInfo) override
Sets the TensorInfo for this InputSlot.
Definition Layer.cpp:609
Layer & GetOwningLayer() const
Definition Layer.hpp:53
const OutputSlot * GetConnectedOutputSlot() const
Definition Layer.hpp:56
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition Layer.cpp:614
unsigned int GetSlotIndex() const override
Definition Layer.hpp:54
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition Layer.cpp:571
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition Layer.hpp:343
const char * GetName() const override
Returns the name of the layer.
Definition Layer.hpp:332
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition Layer.hpp:286
const Parameters & GetParameters() const override
If the layer has a descriptor return it.
std::list< Pair > & Finish()
void Add(LayerT *layer, BlockT *block)
void AddUntouchedSubgraph(SubgraphView &&subgraph)
void AddDeletedSubgraph(SubgraphView &&subgraph)
void AddSubstitution(SubstitutionPair &&substitution)
const InputSlot * GetConnection(unsigned int index) const override
Definition Layer.cpp:83
Layer & GetOwningLayer() const
Definition Layer.hpp:132
const TensorInfo & GetTensorInfo() const override
Definition Layer.cpp:100
This layer represents a pad operation.
Definition PadLayer.hpp:15
This layer represents a pooling 2d operation.
This layer represents a reshape operation.
Strategy base class with empty implementations.
The SubgraphView class represents a subgraph of a Graph.
std::vector< IOutputSlot * > IOutputSlots
std::vector< IInputSlot * > IInputSlots
unsigned int GetNumDimensions() const
Definition Tensor.hpp:197
void SetShape(const TensorShape &newShape)
Definition Tensor.hpp:195
DataType GetDataType() const
Definition Tensor.hpp:200
Copyright (c) 2021 ARM Limited and Contributors.
bool IsSequenceLayerType(Layer &layer, LayerType type)
bool IsNCHW(armnn::Layer &layer)
bool ConnectedToLayerType(Layer *baseLayer, LayerType layerType, unsigned int dimSize=0)
Checks the Layer's Connections to see if it's connected to a Layer with the provided layerType.
bool IsLayerSequence(Layer &currentLayer, TYPE first, TYPE second, TYPE third, Layer *layerList[4], bool handleValidActivates, const std::vector< ActivationFunction > &validActivates)
ActivationFunction
Definition Types.hpp:87
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
LayerType * ReplaceLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, LayerType *replacementLayer)
void RemoveReshapeLayer(ReshapeLayer *baseLayer, std::map< LayerGuid, Layer * > &untouched, OptimizationViews &optimizationViews)
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer * > untouched)
void FoldPadLayer2d(OptimizationViews &optimizationViews, LayerT *baseLayer, Descriptor &descriptor, PadLayer *padLayer)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
bool ConnectedToLayerWithNCHW(Layer *baseLayer)
Checks if the Layer is connected to any Layer that has an NCHW layout.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition Types.hpp:311
BinaryOperation
Definition Types.hpp:139
LayerType * FoldPadLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, LayerType *replacementLayer, PadLayer *padLayer)
DataType
Definition Types.hpp:49
void IgnoreUnused(Ts &&...)
A Convolution2dDescriptor for the Convolution2dLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A Pooling2dDescriptor for the Pooling2dLayer.
A ReshapeDescriptor for the ReshapeLayer.
TensorShape m_TargetShape
Target shape value.