26 void ExecuteStrategy(
const armnn::IConnectableLayer* layer,
27 const armnn::BaseDescriptor& descriptor,
28 const std::vector<armnn::ConstTensor>& constants,
37 auto desc =
static_cast<const armnn::BatchMatMulDescriptor&
>(descriptor);
43 CheckDescForNCHW(
static_cast<const armnn::BatchNormalizationDescriptor&
>(descriptor));
48 CheckDescForNCHW(
static_cast<const armnn::BatchToSpaceNdDescriptor&
>(descriptor));
53 CheckDescForNCHW(
static_cast<const armnn::Convolution2dDescriptor&
>(descriptor));
58 CheckDescForNCHW(
static_cast<const armnn::Convolution3dDescriptor&
>(descriptor));
63 CheckDescForNCHW(
static_cast<const armnn::DepthwiseConvolution2dDescriptor&
>(descriptor));
68 CheckDescForNCHW(
static_cast<const armnn::InstanceNormalizationDescriptor&
>(descriptor));
73 CheckDescForNCHW(
static_cast<const armnn::L2NormalizationDescriptor&
>(descriptor));
78 CheckDescForNCHW(
static_cast<const armnn::NormalizationDescriptor&
>(descriptor));
83 CheckDescForNCHW(
static_cast<const armnn::Pooling2dDescriptor&
>(descriptor));
88 CheckDescForNCHW(
static_cast<const armnn::Pooling3dDescriptor&
>(descriptor));
93 CheckDescForNCHW(
static_cast<const armnn::SpaceToBatchNdDescriptor&
>(descriptor));
98 CheckDescForNCHW(
static_cast<const armnn::SpaceToDepthDescriptor&
>(descriptor));
103 CheckDescForNCHW(
static_cast<const armnn::StridedSliceDescriptor&
>(descriptor));
122 template<
typename Descriptor>
123 void CheckDescForNCHW(
const Descriptor& descriptor)
128 bool m_Result =
false;
138 for (
auto&& layer : layers)
155 for (
auto &&layer: layers)
167typedef std::vector<int> SlotList;
169template<
typename ILayerType>
171 const std::vector<SlotList>& layersSlotLists)
177 for (
unsigned int layerIdx = 0; layerIdx < layers.size(); ++layerIdx)
179 const SlotList& slotList = layersSlotLists[layerIdx];
180 for (
unsigned int slotIdx = 0 ; slotIdx < layers[layerIdx]->GetNumInputSlots(); ++slotIdx)
182 if (std::find(slotList.begin(), slotList.end(), slotIdx) != slotList.end())
184 result.push_back(&(layers[layerIdx]->GetInputSlot(slotIdx)));
191template<
typename ILayerType>
193 const std::vector<SlotList>& layersSlotLists)
198 for (
unsigned int layerIdx = 0; layerIdx < layers.size(); ++layerIdx)
200 const SlotList& slotList = layersSlotLists[layerIdx];
201 for (
unsigned int slotIdx = 0; slotIdx < layers[layerIdx]->GetNumOutputSlots(); ++slotIdx)
203 bool foundIt = std::find(slotList.begin(), slotList.end(), slotIdx) != slotList.end();
206 result.push_back(&(layers[layerIdx]->GetOutputSlot(slotIdx)));
221 template <
typename L,
typename D>
240 return check.Result();
245 std::vector<Layer*> untouchedVector;
246 for (
const auto& pair : untouched)
248 Layer* layer = pair.second;
250 CreateIInputsFrom({layer}),
251 CreateIOutputsFrom({layer}));
256template<
typename LayerType>
262 CreateIInputsFrom({baseLayer}),
263 CreateIOutputsFrom({baseLayer}));
266 CreateIInputsFrom({replacementLayer}),
267 CreateIOutputsFrom({replacementLayer}));
270 optimizationViews.
AddSubstitution({substitutionSubgraph, replacementSubgraph});
272 return replacementLayer;
275template<
typename LayerType>
281 SubgraphView substitutionSubgraph({padLayer, baseLayer},
282 CreateIInputsFrom({padLayer}),
283 CreateIOutputsFrom({baseLayer}));
286 optimizationViews.
AddSubstitution({substitutionSubgraph, replacementSubgraph});
288 return replacementLayer;
300 for (
unsigned int i = 0; i < baseLayer->
GetOutputSlot(0).GetNumConnections(); ++i)
322 for (
unsigned int i = 0; i < baseLayer->
GetOutputSlot(0).GetNumConnections(); ++i)
336 std::map<LayerGuid, Layer*>& untouched,
339 if (baseLayer ==
nullptr)
353 for (
unsigned int i = 0; i < baseLayer->
GetOutputSlot(0).GetNumConnections(); ++i)
362 auto it = untouched.find(baseLayer->
GetGuid());
363 if (it == untouched.end())
371 for (
unsigned int i = 0; i < baseLayer->
GetOutputSlot(0).GetNumConnections(); ++i)
383template<
typename LayerT,
typename Descriptor>
386 Descriptor& descriptor,
392 "FoldPadLayer2d() called with an unsupported (LayerType, Descriptor) combination!");
395 const std::string name = std::string(
"folded-") + padLayer->
GetName() +
"-into-" + baseLayer->GetName();
396 if constexpr (std::is_same_v<LayerT, Pooling2dLayer>)
405 else if constexpr (std::is_same_v<LayerT, Convolution2dLayer> ||
406 std::is_same_v<LayerT, DepthwiseConvolution2dLayer>)
413 if constexpr (std::is_same_v<LayerT, Convolution2dLayer>)
431 "FoldPadLayer2d() called with an unsupported LayerType");
441 return layer.
GetType() == type;
451template<
typename TYPE>
457 bool handleValidActivates,
458 const std::vector<ActivationFunction>& validActivates)
460 auto PreviousLayer = [](
Layer& layer)
462 return &layer.
GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
465 auto NextLayer = [](
Layer& layer)
470 auto LayerIncomingConnectionDataType = [](
Layer& layer)
481 DataType dataType = LayerIncomingConnectionDataType(currentLayer);
484 layerList[2] = ¤tLayer;
487 Layer *prevLayer = PreviousLayer(currentLayer);
490 bool dataTypesMatch = (dataType == LayerIncomingConnectionDataType(*prevLayer));
491 if (! dataTypesMatch)
496 layerList[1] = prevLayer;
497 prevLayer = PreviousLayer(*prevLayer);
500 dataTypesMatch = (dataType == LayerIncomingConnectionDataType(*prevLayer));
501 if (! dataTypesMatch)
506 layerList[0] = prevLayer;
511 if (handleValidActivates)
513 Layer *nextLayer = NextLayer(currentLayer);
521 long count = std::count(validActivates.cbegin(),
522 validActivates.cend(),
526 layerList[3] = nextLayer;
551template<
typename LayerT,
typename BlockT>
564 void Add(LayerT* layer, BlockT* block)
566 if (HasInputs(block))
568 AddReady({layer, block});
573 m_Pending.emplace_back(
Pair{layer,block});
580 if (m_Pending.size())
582 std::stringstream stm;
583 stm <<
"[OpBlockSequencer] " << m_Pending.size();
584 stm <<
" blocks could not be processed!";
585 throw std::invalid_argument(stm.str());
590 bool HasInputs(BlockT* block)
592 for (
auto& inputTensorName : block->GetInputs())
594 if (inputTensorName.find(
"input") != std::string::npos)
599 if (inputTensorName.find(
"constant") != std::string::npos)
604 if (m_TensorMap.find(inputTensorName) == m_TensorMap.end())
612 void AddReady(
Pair&& pair)
614 m_Ready.emplace_back(pair);
615 for (
auto & outputTensor : pair.block->GetOutputs())
617 m_TensorMap[outputTensor] = 1;
621 void ProcessPending()
623 auto itr = m_Pending.begin();
624 while (itr != m_Pending.end())
626 if (HasInputs((*itr).block))
628 AddReady(std::move(*itr));
629 itr = m_Pending.erase(itr);
638 std::list<Pair> m_Ready;
639 std::list<Pair> m_Pending;
640 std::unordered_map<std::string, uint32_t> m_TensorMap;
#define ARMNN_THROW_INVALIDARG_IF_FALSE(_cond)
This layer represents a convolution 2d operation.
This layer represents a depthwise convolution 2d operation.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D convolution layer to the network.
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D depthwise convolution layer to the network.
IConnectableLayer * AddPooling2dLayer(const Pooling2dDescriptor &pooling2dDescriptor, const char *name=nullptr)
Adds a 2D pooling layer to the network.
virtual const IInputSlot * GetConnection(unsigned int index) const =0
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
LayerGuid GetGuid() const final
Returns the unique id of the layer.
const char * GetName() const override
Returns the name of the layer.
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
const Parameters & GetParameters() const override
If the layer has a descriptor return it.
std::list< Pair > & Finish()
void Add(LayerT *layer, BlockT *block)
OpBlockSequencer()=default
~OpBlockSequencer()=default
void AddUntouchedSubgraph(SubgraphView &&subgraph)
void AddDeletedSubgraph(SubgraphView &&subgraph)
void AddSubstitution(SubstitutionPair &&substitution)
const InputSlot * GetConnection(unsigned int index) const override
Layer & GetOwningLayer() const
const TensorInfo & GetTensorInfo() const override
This layer represents a pad operation.
This layer represents a pooling 2d operation.
This layer represents a reshape operation.
Strategy base class with empty implementations.
The SubgraphView class represents a subgraph of a Graph.
std::vector< IOutputSlot * > IOutputSlots
std::vector< IInputSlot * > IInputSlots
unsigned int GetNumDimensions() const
void SetShape(const TensorShape &newShape)
DataType GetDataType() const
constexpr bool alwaysFalse
Copyright (c) 2021 ARM Limited and Contributors.
bool IsSequenceLayerType(Layer &layer, LayerType type)
bool IsNCHW(armnn::Layer &layer)
bool ConnectedToLayerType(Layer *baseLayer, LayerType layerType, unsigned int dimSize=0)
Checks the Layer's Connections to see if it's connected to a Layer with the provided layerType.
bool IsLayerSequence(Layer ¤tLayer, TYPE first, TYPE second, TYPE third, Layer *layerList[4], bool handleValidActivates, const std::vector< ActivationFunction > &validActivates)
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
LayerType * ReplaceLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, LayerType *replacementLayer)
void RemoveReshapeLayer(ReshapeLayer *baseLayer, std::map< LayerGuid, Layer * > &untouched, OptimizationViews &optimizationViews)
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer * > untouched)
void FoldPadLayer2d(OptimizationViews &optimizationViews, LayerT *baseLayer, Descriptor &descriptor, PadLayer *padLayer)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
bool ConnectedToLayerWithNCHW(Layer *baseLayer)
Checks if the Layer is connected to any Layer that has an NCHW layout.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
LayerType * FoldPadLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, LayerType *replacementLayer, PadLayer *padLayer)
void IgnoreUnused(Ts &&...)
A Convolution2dDescriptor for the Convolution2dLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A Pooling2dDescriptor for the Pooling2dLayer.
A ReshapeDescriptor for the ReshapeLayer.
TensorShape m_TargetShape
Target shape value.