15 #include <doctest/doctest.h> 22 template<armnn::DataType DataType>
30 template<armnn::DataType DataType>
35 for (
unsigned int i=0; i < numInputs; i++)
40 for (
unsigned int o=0; o < numOutputs; o++)
49 template<
typename LayerType,
typename DescType =
typename LayerType::DescriptorType>
66 template<
typename LayerType>
83 struct DummyLayer<
armnn::BatchNormalizationLayer>
88 m_Layer->m_Mean = std::make_unique<armnn::ScopedTensorHandle>(
90 m_Layer->m_Variance = std::make_unique<armnn::ScopedTensorHandle>(
92 m_Layer->m_Beta = std::make_unique<armnn::ScopedTensorHandle>(
94 m_Layer->m_Gamma = std::make_unique<armnn::ScopedTensorHandle>(
107 struct DummyLayer<
armnn::BatchToSpaceNdLayer>
123 struct DummyLayer<
armnn::ConstantLayer, void>
155 struct DummyLayer<
armnn::ConcatLayer>
172 struct DummyLayer<
armnn::MapLayer, void>
204 struct DummyLayer<
armnn::SplitterLayer>
221 struct DummyLayer<
armnn::UnmapLayer, void>
236 template <
typename ConvolutionLayerType>
237 struct DummyConvolutionLayer
239 DummyConvolutionLayer()
241 typename ConvolutionLayerType::DescriptorType desc;
245 m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
247 m_Layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(
251 ~DummyConvolutionLayer()
260 struct DummyLayer<
armnn::Convolution2dLayer>
261 :
public DummyConvolutionLayer<armnn::Convolution2dLayer>
266 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
267 :
public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
272 struct DummyLayer<armnn::TransposeConvolution2dLayer>
273 :
public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
278 struct DummyLayer<armnn::DetectionPostProcessLayer>
283 m_Layer->m_Anchors = std::make_unique<armnn::ScopedTensorHandle>(
295 template <
typename LstmLayerType>
296 struct DummyLstmLayer
300 typename LstmLayerType::DescriptorType desc;
301 desc.m_CifgEnabled =
false;
304 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
306 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
308 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
310 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
312 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
314 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
316 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
318 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
320 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
323 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
325 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
327 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
340 struct DummyLayer<armnn::LstmLayer>
341 :
public DummyLstmLayer<armnn::LstmLayer>
345 template <
typename Un
idirectionalSequenceLstmLayerType>
346 struct DummyUnidirectionalSequenceLstmLayer
348 DummyUnidirectionalSequenceLstmLayer()
350 typename UnidirectionalSequenceLstmLayerType::DescriptorType desc;
351 desc.m_CifgEnabled =
false;
353 m_Layer = dummyGraph.
AddLayer<UnidirectionalSequenceLstmLayerType>(desc,
"");
354 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
356 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
358 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
360 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
362 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
364 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
366 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
368 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
370 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
373 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
375 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
377 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
381 ~DummyUnidirectionalSequenceLstmLayer()
390 struct DummyLayer<armnn::UnidirectionalSequenceLstmLayer>
391 :
public DummyUnidirectionalSequenceLstmLayer<armnn::UnidirectionalSequenceLstmLayer>
396 struct DummyLayer<armnn::QLstmLayer>
409 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
411 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
413 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
416 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
418 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
420 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
423 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
425 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
427 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
431 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
433 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
435 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
439 m_Layer->m_ProjectionParameters.m_ProjectionWeights = std::make_unique<armnn::ScopedTensorHandle>(
441 m_Layer->m_ProjectionParameters.m_ProjectionBias = std::make_unique<armnn::ScopedTensorHandle>(
445 m_Layer->m_PeepholeParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
447 m_Layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
449 m_Layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
453 m_Layer->m_LayerNormParameters.m_InputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
455 m_Layer->m_LayerNormParameters.m_ForgetLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
457 m_Layer->m_LayerNormParameters.m_CellLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
459 m_Layer->m_LayerNormParameters.m_OutputLayerNormWeights = std::make_unique<armnn::ScopedTensorHandle>(
472 struct DummyLayer<armnn::QuantizedLstmLayer, void>
478 m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
480 m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
482 m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
484 m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
487 m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedTensorHandle>(
489 m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedTensorHandle>(
491 m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedTensorHandle>(
493 m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedTensorHandle>(
496 m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
498 m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedTensorHandle>(
500 m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedTensorHandle>(
502 m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedTensorHandle>(
515 struct DummyLayer<armnn::FullyConnectedLayer>
521 m_Layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(
534 template<armnn::LayerType>
537 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \ 538 template<armnn::DataType DataType> \ 539 struct LayerTypePolicy<armnn::LayerType::name, DataType> \ 541 using Type = armnn::name##Layer; \ 542 using Desc = descType; \ 543 using QueueDesc = armnn::name##QueueDescriptor; \ 544 constexpr static const char* NameStr = #name; \ 545 constexpr static const bool IsException = false; \ 547 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \ 548 unsigned int nIn, unsigned int nOut) \ 551 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \ 552 return factory->Create##name(desc, info); \ 556 #define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType) \ 557 template<armnn::DataType DataType> \ 558 struct LayerTypePolicy<armnn::LayerType::name, DataType> \ 560 using Type = armnn::name##Layer; \ 561 using Desc = descType; \ 562 using QueueDesc = armnn::name##QueueDescriptor; \ 563 using Workload = armnn::name##Workload; \ 564 constexpr static const char* NameStr = #name; \ 565 constexpr static const bool IsException = false; \ 567 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory* factory, \ 568 unsigned int nIn, unsigned int nOut) \ 570 IgnoreUnused(factory); \ 572 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \ 573 return std::make_unique<armnn::name##Workload>(desc, info); \ 579 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void) 583 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor) 586 #define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \ 587 template<armnn::DataType DataType> \ 588 struct LayerTypePolicy<armnn::LayerType::name, DataType> \ 590 using Type = armnn::name##Layer; \ 591 using Desc = descType; \ 592 constexpr static const char* NameStr = #name; \ 593 constexpr static const bool IsException = true; \ 595 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \ 596 unsigned int nIn, unsigned int nOut) \ 598 IgnoreUnused(factory, nIn, nOut); \ 599 return std::unique_ptr<armnn::IWorkload>(); \ 603 #define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void) 604 #define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor) 607 template<armnn::LayerType Type, armnn::DataType DataType>
608 struct LayerTypePolicy;
751 template<armnn::LayerType Type>
758 template<armnn::LayerType Type>
765 unsigned int GetNumInputs<armnn::LayerType::Concat>(
const armnn::Layer& layer)
774 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
775 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
777 using LayerPolicy = LayerTypePolicy<Type, DataType>;
778 using LayerType =
typename LayerPolicy::Type;
779 using LayerDesc =
typename LayerPolicy::Desc;
780 DummyLayer<LayerType, LayerDesc> layer;
782 if (LayerPolicy::IsException)
787 unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
788 unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
791 DummyLayer<armnn::ConstantLayer, void> previousLayer;
794 previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
796 for (
unsigned int i = 0; i < numIn; i++)
798 armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
800 previousLayerOutputSlot.
Connect(layerInputSlot);
803 for (
unsigned int i = 0; i < numOut; i++)
805 layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
808 std::string layerName = LayerPolicy::NameStr;
809 std::string reasonIfUnsupported;
810 if (FactoryType::IsLayerSupported(*layer.m_Layer,
DataType, reasonIfUnsupported))
812 std::string errorMsg =
" layer expected support but found none.";
815 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() !=
nullptr;
816 CHECK_MESSAGE(retVal, layerName << errorMsg);
825 catch(
const std::exception& e)
828 FAIL(layerName <<
": " << errorMsg);
833 errorMsg =
"Unexpected error while testing support for ";
834 FAIL(errorMsg << layerName);
840 std::string errorMsg =
"layer expected no support (giving reason: " + reasonIfUnsupported +
") but found some.";
843 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() ==
nullptr;
844 CHECK_MESSAGE(retVal, layerName << errorMsg);
860 catch(
const std::exception& e)
863 FAIL(layerName <<
": " << errorMsg);
868 errorMsg =
"Unexpected error while testing support for ";
869 FAIL(errorMsg << layerName);
875 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
876 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Map>)
882 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
883 bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>)
896 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
897 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
899 return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
903 template<
typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
904 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
906 bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
909 IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
910 (factory, Tag<NextType(Type)>());
914 template<
typename FactoryType, armnn::DataType DataType>
915 bool IsLayerSupportedTests(FactoryType *factory)
917 return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
920 template<armnn::LayerType Type>
921 bool TestLayerTypeMatches()
923 using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
924 using LayerType =
typename LayerPolicy::Type;
925 using LayerDesc =
typename LayerPolicy::Desc;
926 DummyLayer<LayerType, LayerDesc> layer;
928 std::stringstream ss;
929 ss << LayerPolicy::NameStr <<
" layer type mismatches expected layer type value.";
930 bool v = Type == layer.m_Layer->GetType();
931 CHECK_MESSAGE(v, ss.str());
935 template<armnn::LayerType Type>
936 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
938 return TestLayerTypeMatches<Type>();
941 template<armnn::LayerType Type>
942 bool LayerTypeMatchesTestImpl(Tag<Type>)
944 return TestLayerTypeMatches<Type>() &&
945 LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
948 template<
typename FactoryType,
typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
949 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
960 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
961 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
962 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
963 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
965 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
970 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
971 bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
989 input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
992 input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
994 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
995 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
997 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1002 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1003 bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
1021 input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
1024 input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
1026 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
1027 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
1029 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1034 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1035 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
1038 static const std::vector<unsigned> axes = {1, 0};
1049 input->GetOutputSlot(0).Connect(layer->
GetInputSlot(0));
1050 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1054 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1061 template<
typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
1062 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
1065 static const std::vector<unsigned> axes = {};
1078 input->GetOutputSlot(0).Connect(layer->
GetInputSlot(0));
1079 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
1083 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
1088 template<
typename FactoryType, armnn::DataType OutputDataType>
1089 bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
1101 bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
A layer that the constant data can be bound to.
This layer represents a split operation.
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
This layer represents a batch normalization operation.
A ViewsDescriptor for the SplitterLayer.
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle *> inputs, std::vector< ITensorHandle *> outputs)
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output, const TensorInfo &inputInfo, const TensorInfo &outputInfo)
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
bool m_PeepholeEnabled
Enable/disable peephole.
void Reduce(const TensorInfo &inputInfo, const TensorInfo &outputInfo, Decoder< float > &input, Encoder< float > &output, const std::vector< uint32_t > axis, const ReduceOperation reduceOperation)
int Connect(InputSlot &destination)
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
This layer represents a detection postprocess operator.
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Copyright (c) 2021 ARM Limited and Contributors.
#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType)
This layer represents a LSTM operation.
void IgnoreUnused(Ts &&...)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
This layer represents a memory copy operation.
This layer represents a memory copy operation.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
std::vector< TensorInfo > m_InputTensorInfos
This layer represents a Logical Binary operation.
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
A layer user-provided data can be bound to (e.g. inputs, outputs).
bool m_LayerNormEnabled
Enable/disable layer normalization.
This layer represents a fully connected operation.
This layer represents a QuantizedLstm operation.
An output connection slot for a layer.
void Gather(const TensorInfo ¶msInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > ¶ms, const int32_t *indices, Encoder< float > &output, const int32_t axis)
An OriginsDescriptor for the ConcatLayer.
#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name)
A FullyConnectedDescriptor for the FullyConnectedLayer.
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
This layer represents a merge operation.
float Activation(float in, ActivationFunction function, float a, float b)
This layer represents a BatchToSpaceNd operation.
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const ITensorHandle *inputHandle, ITensorHandle *outputHandle, const PadQueueDescriptor &data)
A QLstmDescriptor for the QLstmLayer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
std::vector< TensorInfo > m_OutputTensorInfos
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor ¶ms, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_MAP_PARAM(name, descType)
#define DECLARE_LAYER_POLICY_1_PARAM(name)
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
This layer represents a QLstm operation.
This layer represents a LSTM operation.
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor ¶ms, const void *inputData, void *outputData, unsigned int dataTypeSize)
const OutputHandler & GetOutputHandler(unsigned int i=0) const
bool m_ProjectionEnabled
Enable/disable the projection layer.
A MeanDescriptor for the MeanLayer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Contains information about TensorInfos of a layer.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor ¶ms, Decoder< float > &inputData, Encoder< float > &outputData)
#define DECLARE_LAYER_POLICY_2_PARAM(name)
This layer represents a mean operation.
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
virtual int Connect(IInputSlot &destination)=0
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor ¶ms)
Computes the Pooling2d operation.
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...