24.02
|
Go to the documentation of this file.
24 bool checkDataTypeInputandOutput(
const Layer& layer)
26 auto inputInfo = layer.GetInputSlot(0).GetTensorInfo();
27 auto outputInfo = layer.GetOutputSlot(0).GetTensorInfo();
28 bool sameDataType = (inputInfo.GetDataType() == outputInfo.GetDataType());
35 bool sameScale = (inputInfo.GetQuantizationScale() == outputInfo.GetQuantizationScale());
36 bool sameOffset = (inputInfo.GetQuantizationOffset() == outputInfo.GetQuantizationOffset());
38 return (sameScale && sameOffset);
53 template<
typename LayerType>
60 replacementLayer->SetAdditionalInfoForObject(
61 std::make_shared<ActivationDescriptor>(activationDesc));
63 SubgraphView substitutionSubgraph({baseLayer, activationLayer},
64 CreateIInputsFrom({baseLayer}),
65 CreateIOutputsFrom({activationLayer}));
68 optimizationViews.
AddSubstitution({substitutionSubgraph, replacementSubgraph});
70 return replacementLayer;
73 template<
typename LayerType>
83 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
91 return replacementLayer;
94 template<
typename LayerType>
104 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
112 return replacementLayer;
115 template<
typename LayerType>
125 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
133 return replacementLayer;
136 template<
typename LayerType>
146 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
154 return replacementLayer;
157 template<
typename LayerType>
167 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
175 return replacementLayer;
178 template<
typename LayerType>
192 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
200 SubgraphView substitutionSubgraph({baseLayer, activationLayer},
201 CreateIInputsFrom({baseLayer}),
202 CreateIOutputsFrom({activationLayer}));
205 return replacementLayer;
208 template<
typename LayerType>
218 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
227 return replacementLayer;
230 template<
typename LayerType>
240 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
249 return replacementLayer;
252 template<
typename LayerType>
262 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
271 return replacementLayer;
278 template<
typename LayerType>
284 std::vector<IConnectableLayer*> layers;
287 std::vector<uint32_t> axes;
288 unsigned int recalulatedAxis = 0;
290 for (
unsigned int i = 0; i != desc.
m_vAxis.size(); ++i)
293 TensorInfo layerInfo = baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
295 axes.emplace_back(desc.
m_vAxis[i]);
303 std::vector<uint32_t> singleAxis(1, desc.
m_vAxis[i] - recalulatedAxis);
307 newReduceDescriptor.
m_vAxis.assign(singleAxis.begin(), singleAxis.end());
310 std::string layerName =
"reduce_layer_" + std::to_string(i);
312 Layer* replacementLayer = PolymorphicDowncast<Layer*>(
320 layers[i - 1]->GetOutputSlot(0).Connect(replacementLayer->
GetInputSlot(0));
331 layers.emplace_back(replacementLayer);
335 ARMNN_ASSERT(baseLayer->GetOutputSlot(0).GetTensorInfo() ==
336 PolymorphicDowncast<Layer*>(layers.back())->GetOutputSlot().GetTensorInfo());
344 template<
typename LayerType>
347 std::vector<IConnectableLayer*>& layers)
349 std::list<IConnectableLayer*> replacementLayers(layers.begin(), layers.end());
352 SubgraphView replacementSubgraph(std::move(replacementLayers),
353 CreateIInputsFrom({replacementLayers.front()}),
354 CreateIOutputsFrom({replacementLayers.back()}));
356 optimizationViews.
AddSubstitution({substitutionSubgraph, replacementSubgraph});
362 template<
typename LayerType>
364 std::vector<IConnectableLayer*>& originalLayers,
366 const std::vector<SlotList> inputLayersSlotLists,
367 const std::vector<SlotList> outputLayersSlotLists)
369 std::list<IConnectableLayer*> originalLayerList(originalLayers.begin(), originalLayers.end());
372 std::move(originalLayerList),
373 CreateIInputsFromSlotLists<armnn::IConnectableLayer>(originalLayers, inputLayersSlotLists),
374 CreateIOutputsFromSlotLists<armnn::IConnectableLayer>(originalLayers, outputLayersSlotLists));
377 optimizationViews.
AddSubstitution({substitutionSubgraph, replacementSubgraph});
#define ARMNN_ASSERT(COND)
std::vector< IConnectableLayer * > ChainReduceLayers(OptimizationViews &optimizationViews, LayerType *baseLayer, ReduceDescriptor &desc)
An ActivationDescriptor for the ActivationLayer.
IConnectableLayer * AddReduceLayer(const ReduceDescriptor &reduceDescriptor, const char *name=nullptr)
Adds a reduce layer to the network.
LayerType * FuseDepthwiseConvolution2dLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
IConnectableLayer * AddAdditionLayer(const char *name=nullptr)
Adds an addition layer to the network.
void SetTensorInfo(const TensorInfo &tensorInfo) override
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const char *name=nullptr)
Adds a fully connected layer to the network.
LayerType * FuseConvolution2dLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
void ReplaceLayers(OptimizationViews &optimizationViews, LayerType *baseLayer, std::vector< IConnectableLayer * > &layers)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
LayerType * FuseAdditionLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
LayerType * FuseMultiplicationLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)
Adds a batch normalization layer to the network.
LayerType * FuseBatchNormalizationLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D convolution layer to the network.
LayerType * FuseDivisionLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
The SubgraphView class represents a subgraph of a Graph.
IConnectableLayer * AddDivisionLayer(const char *name=nullptr)
Adds a division layer to the network.
LayerType * FuseFullyConnectedLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
void AddSubstitution(SubstitutionPair &&substitution)
LayerType * FuseElementwiseBinaryLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, BinaryOperation operation, std::string name)
IConnectableLayer * AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor &elementwiseBinaryDescriptor, const char *name=nullptr)
Add an ElementwiseBinary layer to the network.
bool m_KeepDims
if true then output shape has no change.
IConnectableLayer * AddSubtractionLayer(const char *name=nullptr)
Adds a subtraction layer to the network.
This layer represents an activation operation with the specified activation function.
#define ARMNN_NO_DEPRECATE_WARN_END
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D depthwise convolution layer to the network.
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
LayerType * FuseSubtractionLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
constexpr bool IsQuantizedType()
Copyright (c) 2021 ARM Limited and Contributors.
IConnectableLayer * AddMultiplicationLayer(const char *name=nullptr)
Adds a multiplication layer to the network.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
const TensorInfo ComputeReductionTensorShape(const armnn::TensorInfo &input, const std::vector< uint32_t > &vAxis, const bool keepDims)
Function to compute the output tensor shape based on the axes and if keepDims is set.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
LayerType * FuseLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, LayerType *replacementLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc)
A ReduceDescriptor for the REDUCE operators.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
void ReplaceMultipleLayers(OptimizationViews &optimizationViews, std::vector< IConnectableLayer * > &originalLayers, LayerType *baseLayer, const std::vector< SlotList > inputLayersSlotLists, const std::vector< SlotList > outputLayersSlotLists)