24.02
|
#include <RefLayerSupport.hpp>
|
bool | IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override |
| Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported. More...
|
|
bool | IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsBroadcastToSupported (const TensorInfo &input, const TensorInfo &output, const BroadcastToDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const |
|
bool | IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const |
|
bool | IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsReverseV2Supported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsTileSupported (const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
Definition at line 12 of file RefLayerSupport.hpp.
◆ IsActivationSupported()
Definition at line 553 of file RefLayerSupport.cpp.
558 bool supported =
true;
561 std::array<DataType,6> supportedTypes = {
569 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
570 "Reference activation: input type not supported.");
572 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
573 "Reference activation: output type not supported.");
575 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
576 "Reference activation: input and output types mismatched.");
578 supported &=
CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
579 "Reference activation: input and output shapes are of different rank.");
582 struct ActivationFunctionSupported :
public Rule
584 ActivationFunctionSupported(
const ActivationDescriptor& desc)
586 switch(desc.m_Function)
615 supported &=
CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
616 "Reference activation: function not supported.");
References armnn::Abs, armnn::BoundedReLu, armnn::CheckSupportRule(), armnn::Elu, armnn::Float16, armnn::Float32, armnn::Gelu, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsAdditionSupported()
Definition at line 621 of file RefLayerSupport.cpp.
626 bool supported =
true;
628 std::array<DataType,7> supportedTypes = {
637 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
638 "Reference addition: input 0 is not a supported type.");
640 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
641 "Reference addition: input 1 is not a supported type.");
643 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
644 "Reference addition: output is not a supported type.");
646 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
647 "Reference addition: input 0 and Input 1 types are mismatched");
649 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
650 "Reference addition: input and output types are mismatched");
652 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
653 "Reference addition: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsArgMinMaxSupported()
Definition at line 658 of file RefLayerSupport.cpp.
664 std::array<DataType, 8> supportedInputTypes =
675 std::array<DataType,2> supportedOutputTypes = {
680 bool supported =
true;
682 supported &=
CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
683 "Reference ArgMinMax: input is not a supported type.");
684 supported &=
CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
685 "Reference ArgMinMax: output type not supported");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and armnn::Signed64.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsBatchMatMulSupported()
Definition at line 690 of file RefLayerSupport.cpp.
698 std::array<DataType, 6> supportedTypes =
707 bool supported =
true;
709 supported &=
CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
710 "Reference batch matrix multiplication: input X is not a supported type");
712 supported &=
CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
713 "Reference batch matrix multiplication: input Y is not a supported type");
715 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
716 "Reference batch matrix multiplication: output is not a supported type");
718 supported &=
CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
719 "Reference batch matrix multiplication: input X and input Y types are mismatched");
721 supported &=
CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
722 "Reference batch matrix multiplication: inputs and output types are mismatched");
724 supported &=
CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputX, 2),
726 "Reference batch matrix multiplication: input X is not of rank 2 or greater");
728 supported &=
CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputY, 2),
730 "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsBatchNormalizationSupported()
Definition at line 735 of file RefLayerSupport.cpp.
746 std::array<DataType, 6> supportedTypes =
755 bool supported =
true;
757 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
758 "Reference batch normalization: input is not a supported type.");
760 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
761 "Reference batch normalization: output is not a supported type.");
763 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
764 "Reference batch normalization: input and output types are mismatched");
766 supported &=
CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
767 "Reference batch normalization: mean is not a supported type.");
769 supported &=
CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
770 "Reference batch normalization: variance is not a supported type.");
772 supported &=
CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
773 "Reference batch normalization: beta is not a supported type.");
775 supported &=
CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
776 "Reference batch normalization: gamma is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsBatchToSpaceNdSupported()
Definition at line 781 of file RefLayerSupport.cpp.
788 bool supported =
true;
790 std::string batchToSpaceNdLayerStr =
"batchToSpaceNd";
791 std::string inputTensorStr =
"input";
792 std::string outputTensorStr =
"output";
795 std::array<DataType,6> supportedTypes =
804 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
805 "Reference BatchToSpaceNd: input type not supported.");
807 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
808 "Reference BatchToSpaceNd: output type not supported.");
810 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
811 "Reference BatchToSpaceNd: input and output types mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsBroadcastToSupported()
Definition at line 816 of file RefLayerSupport.cpp.
823 bool supported =
true;
825 std::array<DataType, 8> supportedTypes
837 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
838 "BroadcastTo: input type not supported.");
840 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
841 "BroadcastTo: output type not supported");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and armnn::Signed64.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsCastSupported()
Definition at line 846 of file RefLayerSupport.cpp.
850 std::array<DataType, 10> supportedInputTypes =
862 bool supported =
true;
863 supported &=
CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
864 "Reference cast: input is not a supported type");
867 supported &=
CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
868 "Reference cast: output is not a supported type");
870 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
871 "Reference cast: input and output shapes have different number of total elements");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and armnn::Signed64.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsChannelShuffleSupported()
Definition at line 876 of file RefLayerSupport.cpp.
882 bool supported =
true;
885 std::array<DataType, 7> supportedTypes =
895 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
896 "Reference ChannelShuffle: input is not a supported type.");
898 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
899 "Reference ChannelShuffle: output is not a supported type.");
901 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
902 "Reference ChannelShuffle: input and output types are mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsComparisonSupported()
Definition at line 908 of file RefLayerSupport.cpp.
915 std::array<DataType, 8> supportedInputTypes =
926 bool supported =
true;
927 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
928 "Reference comparison: input 0 is not a supported type");
930 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
931 "Reference comparison: input 0 and Input 1 types are mismatched");
934 "Reference comparison: output is not of type Boolean");
References armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsConcatSupported()
Definition at line 939 of file RefLayerSupport.cpp.
946 bool supported =
true;
947 std::array<DataType,7> supportedTypes =
957 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
958 "Reference concatenation: output type not supported");
959 for (
const TensorInfo* input : inputs)
962 supported &=
CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
963 "Reference concatenation: input type not supported");
965 supported &=
CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
966 "Reference concatenation: input and output types mismatched.");
References ARMNN_ASSERT, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsConstantSupported()
Definition at line 972 of file RefLayerSupport.cpp.
975 std::array<DataType,8> supportedTypes =
986 return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
987 "Reference constant: output is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsConvertFp16ToFp32Supported()
◆ IsConvertFp32ToFp16Supported()
◆ IsConvolution2dSupported()
Definition at line 1030 of file RefLayerSupport.cpp.
1037 bool supported =
true;
1040 std::array<DataType,7> supportedTypes =
1050 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1051 "Reference Convolution2d: input is not a supported type.");
1053 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1054 "Reference Convolution2d: output is not a supported type.");
1056 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1057 "Reference Convolution2d: input and output types mismatched.");
1060 const DataType inputType = input.GetDataType();
1063 std::array<DataType, 3> supportedWeightTypes =
1070 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1071 "Reference Convolution2d: weights type not supported for quantized input.");
1075 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1076 "Reference Convolution2d: weights is not a supported type.");
1078 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1079 "Reference Convolution2d: input and weights types mismatched.");
1082 if (biases.has_value())
1084 std::array<DataType,4> biasesSupportedTypes =
1091 supported &=
CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1092 "Reference Convolution2d: biases is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsConvolution3dSupported()
Definition at line 1099 of file RefLayerSupport.cpp.
1106 bool supported =
true;
1109 std::array<DataType,7> supportedTypes =
1119 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1120 "Reference Convolution3d: input is not a supported type.");
1122 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1123 "Reference Convolution3d: output is not a supported type.");
1125 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1126 "Reference Convolution3d: input and output types mismatched.");
1128 const DataType inputType = input.GetDataType();
1131 std::array<DataType, 3> supportedWeightTypes =
1138 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1139 "Reference Convolution3d: weights type not supported for quantized input.");
1143 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1144 "Reference Convolution3d: weights is not a supported type.");
1146 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1147 "Reference Convolution3d: input and weights types mismatched.");
1150 if (biases.has_value())
1152 std::array<DataType,4> biasesSupportedTypes =
1159 supported &=
CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1160 "Reference Convolution3d: biases is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDebugSupported()
Definition at line 1167 of file RefLayerSupport.cpp.
1171 bool supported =
true;
1173 std::array<DataType, 8> supportedTypes =
1185 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1186 "Reference for Debug layer: input type not supported");
1188 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1189 "Reference for Debug layer: output type not supported");
1191 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1192 "Reference for Debug layer: input and output types are mismatched");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDepthToSpaceSupported()
Definition at line 1197 of file RefLayerSupport.cpp.
1203 bool supported =
true;
1205 std::array<DataType,6> supportedTypes =
1214 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1215 "Reference DepthToSpace: input type not supported");
1217 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1218 "Reference DepthToSpace: output type not supported");
1220 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1221 "Reference DepthToSpace: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDepthwiseConvolutionSupported()
Definition at line 1226 of file RefLayerSupport.cpp.
1234 bool supported =
true;
1237 std::array<DataType,7> supportedTypes =
1247 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1248 "Reference DepthwiseConvolution2d: input is not a supported type.");
1250 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1251 "Reference DepthwiseConvolution2d: output is not a supported type.");
1253 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1254 "Reference DepthwiseConvolution2d: input and output types mismatched.");
1256 const DataType inputType = input.GetDataType();
1259 std::array<DataType, 3> supportedWeightTypes =
1266 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1267 "Reference DepthwiseConvolution2d: weights type not supported for "
1268 "quantized input.");
1272 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1273 "Reference DepthwiseConvolution2d: weights is not a supported type.");
1275 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1276 "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1279 if (biases.has_value())
1281 std::array<DataType,4> biasesSupportedTypes =
1287 supported &=
CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1288 "Reference DepthwiseConvolution2d: biases is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(), and RefLayerSupport::IsLayerSupported().
◆ IsDequantizeSupported()
Definition at line 1295 of file RefLayerSupport.cpp.
1299 bool supported =
true;
1301 std::array<DataType,5> supportedInputTypes = {
1309 supported &=
CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1310 "Reference for Dequantize layer: input type not supported.");
1312 supported &=
CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1313 "Reference for Dequantize layer: per-axis quantized input not supported.");
1315 std::array<DataType,3> supportedOutputTypes = {
1320 supported &=
CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1321 "Reference for Dequantize layer: output type not supported.");
1323 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1324 "Reference for Dequantize layer: input/output shapes have different num total "
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDetectionPostProcessSupported()
Definition at line 1330 of file RefLayerSupport.cpp.
1340 IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
1342 bool supported =
true;
1344 std::array<DataType,6> supportedInputTypes =
1353 supported &=
CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1354 "Reference DetectionPostProcess: input 0 is not a supported type.");
1356 supported &=
CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1357 "Reference DetectionPostProcess: input 1 is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDilatedDepthwiseConvolutionSupported()
◆ IsDivisionSupported()
Definition at line 1372 of file RefLayerSupport.cpp.
1377 bool supported =
true;
1379 std::array<DataType,7> supportedTypes = {
1388 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1389 "Reference division: input 0 is not a supported type.");
1391 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1392 "Reference division: input 1 is not a supported type.");
1394 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1395 "Reference division: output is not a supported type.");
1397 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1398 "Reference division: input 0 and Input 1 types are mismatched");
1400 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1401 "Reference division: input and output types are mismatched");
1403 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1404 "Reference division: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsElementwiseUnarySupported()
Definition at line 1409 of file RefLayerSupport.cpp.
1416 std::array<DataType, 7> supportedTypes =
1426 std::array<DataType, 1> logicalSupportedTypes =
1431 bool supported =
true;
1435 supported &=
CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1436 "Reference elementwise unary: input type not supported");
1438 supported &=
CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1439 "Reference elementwise unary: output type not supported");
1443 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1444 "Reference elementwise unary: input type not supported");
1446 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1447 "Reference elementwise unary: output type not supported");
1450 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1451 "Reference elementwise unary: input and output types not matching");
1453 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1454 "Reference elementwise unary: input and output shapes"
1455 "have different number of total elements");
References armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsFakeQuantizationSupported()
◆ IsFillSupported()
◆ IsFloorSupported()
Definition at line 1503 of file RefLayerSupport.cpp.
1508 bool supported =
true;
1510 std::array<DataType,3> supportedTypes =
1516 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1517 "Reference Floor: input type not supported.");
1519 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1520 "Reference Floor: output type not supported.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsFullyConnectedSupported()
Definition at line 1525 of file RefLayerSupport.cpp.
1532 bool supported =
true;
1535 std::array<DataType,6> supportedTypes =
1544 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1545 "Reference Fully Connected: input type not supported.");
1547 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1548 "Reference Fully Connected: output type not supported.");
1550 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1551 "Reference Fully Connected: weights type not supported.");
1553 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1554 "Reference Fully Connected: input and output types mismatched.");
1556 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1557 "Reference Fully Connected: weights is not a supported type.");
1559 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1560 "Reference Fully Connected: input and weights types mismatched.");
1562 if (descriptor.m_BiasEnabled)
1565 std::array<DataType, 5>
1566 supportedBiasTypes =
1574 supported &=
CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1575 "Reference Fully Connected: bias type not supported.");
1577 supported &=
CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1578 "Reference Fully Connected: bias and weight types mismatch.");
1580 supported &=
CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1581 "Reference Fully Connected: bias type inferred from weights is incompatible.");
1583 supported &=
CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1584 "Reference Fully Connected: bias must have 1 dimension.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, FullyConnectedDescriptor::m_BiasEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsGatherNdSupported()
Definition at line 1591 of file RefLayerSupport.cpp.
1596 bool supported =
true;
1597 std::array<DataType,7> supportedTypes =
1607 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1608 "Reference GatherNd: input type not supported");
1610 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1611 "Reference GatherNd: output type not supported");
1614 "Reference GatherNd: indices (input1) type not supported");
1616 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1617 "Reference GatherNd: input and output types not matching");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsGatherSupported()
Definition at line 1622 of file RefLayerSupport.cpp.
1628 bool supported =
true;
1629 std::array<DataType,7> supportedTypes =
1640 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1641 "Reference Gather: input type not supported");
1643 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1644 "Reference Gather: output type not supported");
1647 "Reference Gather: indices (input1) type not supported");
1649 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1650 "Reference Gather: input and output types not matching");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsInputSupported()
◆ IsInstanceNormalizationSupported()
Definition at line 1661 of file RefLayerSupport.cpp.
1668 std::array<DataType, 3> supportedTypes =
1674 bool supported =
true;
1676 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1677 "Reference Instance Normalization: input type not supported.");
1679 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1680 "Reference Instance Normalization: output type not supported.");
1682 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1683 "Reference Instance Normalization: input and output types mismatched.");
1685 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1686 "Reference Instance Normalization: input and output shapes have different "
1687 "num total elements.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsL2NormalizationSupported()
Definition at line 1692 of file RefLayerSupport.cpp.
1699 std::array<DataType, 6> supportedTypes =
1708 bool supported =
true;
1710 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1711 "Reference L2normalization: input type not supported.");
1713 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1714 "Reference L2normalization: output type not supported.");
1716 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1717 "Reference L2normalization: input and output types mismatched.");
1719 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1720 "Reference L2normalization: input and output shapes have different "
1721 "num total elements.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsLayerSupported()
Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.
Reimplemented from LayerSupportBase.
Definition at line 61 of file RefLayerSupport.cpp.
73 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
80 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
86 *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
95 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
101 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
102 reasonIfUnsupported);
106 *(PolymorphicDowncast<const BroadcastToDescriptor*>(&descriptor)),
107 reasonIfUnsupported);
112 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
113 reasonIfUnsupported);
116 std::vector<const TensorInfo*> inputInfos;
117 for (uint32_t i = 0; i < (infos.size() - 1); i++)
119 inputInfos.push_back(&infos[i]);
122 infos[infos.size() - 1],
123 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
124 reasonIfUnsupported);
134 if (infos.size() != 4)
136 throw InvalidArgumentException(
"Invalid number of Convolution2d TensorInfos. "
137 "TensorInfos should be of format: {input, output, weights, biases}.");
140 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
141 if (infos[3] == TensorInfo())
148 reasonIfUnsupported);
157 reasonIfUnsupported);
163 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
164 reasonIfUnsupported);
167 if (infos.size() != 4)
169 throw InvalidArgumentException(
"Invalid number of DepthwiseConvolution2d TensorInfos. "
170 "TensorInfos should be of format: {input, output, weights, biases}.");
173 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
174 if (infos[3] == TensorInfo())
181 reasonIfUnsupported);
190 reasonIfUnsupported);
199 std::array<DataType, 7> supportedTypes =
209 bool supported =
true;
210 supported &=
CheckSupportRule(TypeAnyOf(infos[0], supportedTypes), reasonIfUnsupported,
211 "Reference elementwise unary: input type not supported");
213 supported &=
CheckSupportRule(TypeAnyOf(infos[1], supportedTypes), reasonIfUnsupported,
214 "Reference elementwise unary: input type not supported");
216 supported &=
CheckSupportRule(TypeAnyOf(infos[2], supportedTypes), reasonIfUnsupported,
217 "Reference elementwise unary: output type not supported");
219 supported &=
CheckSupportRule(TypesAreEqual(infos[0], infos[1]), reasonIfUnsupported,
220 "Reference elementwise unary: input types not matching");
222 supported &=
CheckSupportRule(TypesAreEqual(infos[0], infos[2]), reasonIfUnsupported,
223 "Reference elementwise unary: input and output types not matching");
230 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
231 reasonIfUnsupported);
235 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
236 reasonIfUnsupported);
244 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
245 reasonIfUnsupported);
250 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
251 reasonIfUnsupported);
256 reasonIfUnsupported);
262 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
264 reasonIfUnsupported);
268 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
269 reasonIfUnsupported);
274 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
275 reasonIfUnsupported);
279 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
280 reasonIfUnsupported);
289 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
290 lstmParamsInfo.value(),
291 reasonIfUnsupported);
299 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
300 lstmParamsInfo.value(),
301 reasonIfUnsupported);
307 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
308 reasonIfUnsupported);
316 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
317 reasonIfUnsupported);
323 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
324 reasonIfUnsupported);
328 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
329 reasonIfUnsupported);
333 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
334 reasonIfUnsupported);
342 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
343 reasonIfUnsupported);
347 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
348 reasonIfUnsupported);
353 reasonIfUnsupported);
357 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
358 reasonIfUnsupported);
362 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
363 reasonIfUnsupported);
367 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
368 reasonIfUnsupported);
372 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
373 reasonIfUnsupported);
377 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
378 reasonIfUnsupported);
381 std::vector<TensorInfo> outputInfos;
382 for (uint32_t i = 1; i < infos.size(); i++)
384 outputInfos.push_back(infos[i]);
387 {outputInfos.begin(), outputInfos.end()},
388 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
389 reasonIfUnsupported);
393 std::vector<const TensorInfo*> inputInfos;
394 for (uint32_t i = 0; i < infos.size() - 1; i++)
396 inputInfos.push_back(&infos[i]);
399 infos[infos.size() - 1],
400 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
401 reasonIfUnsupported);
406 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
407 reasonIfUnsupported);
413 *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
414 reasonIfUnsupported);
418 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
419 reasonIfUnsupported);
422 if (infos.size() != 4)
424 throw InvalidArgumentException(
"Invalid number of TransposeConvolution2d TensorInfos. "
425 "TensorInfos should be of format: {input, output, weights, biases}.");
428 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
429 if (infos[3] == TensorInfo())
436 reasonIfUnsupported);
445 reasonIfUnsupported);
453 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
454 reasonIfUnsupported);
457 if (infos.size() != 4)
459 throw InvalidArgumentException(
"Invalid number of Convolution3d TensorInfos. "
460 "TensorInfos should be of format: {input, output, weights, biases}.");
463 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
464 if (infos[3] == TensorInfo())
471 reasonIfUnsupported);
480 reasonIfUnsupported);
493 *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
495 reasonIfUnsupported);
498 *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
499 reasonIfUnsupported);
508 if (infos.size() != 6)
510 throw InvalidArgumentException(
"Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
511 "should be of format: {input, outputStateIn, cellStateIn, "
512 "hiddenStateOutputVal, cellStateOutputVal, output}");
514 auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
522 lstmParamsInfo.value(),
523 reasonIfUnsupported);
528 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
529 reasonIfUnsupported);
544 quantizedLstmInputParamsInfo.value(),
545 reasonIfUnsupported);
References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::BroadcastTo, armnn::Cast, armnn::ChannelShuffle, armnn::CheckSupportRule(), armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Float16, armnn::Float32, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::Input, armnn::InstanceNormalization, RefLayerSupport::IsActivationSupported(), RefLayerSupport::IsAdditionSupported(), RefLayerSupport::IsArgMinMaxSupported(), RefLayerSupport::IsBatchMatMulSupported(), RefLayerSupport::IsBatchNormalizationSupported(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsBroadcastToSupported(), RefLayerSupport::IsCastSupported(), RefLayerSupport::IsChannelShuffleSupported(), RefLayerSupport::IsComparisonSupported(), RefLayerSupport::IsConcatSupported(), RefLayerSupport::IsConstantSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDebugSupported(), RefLayerSupport::IsDepthToSpaceSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), RefLayerSupport::IsDequantizeSupported(), RefLayerSupport::IsDetectionPostProcessSupported(), RefLayerSupport::IsDivisionSupported(), RefLayerSupport::IsElementwiseUnarySupported(), RefLayerSupport::IsFakeQuantizationSupported(), RefLayerSupport::IsFillSupported(), RefLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), RefLayerSupport::IsGatherNdSupported(), RefLayerSupport::IsGatherSupported(), RefLayerSupport::IsInputSupported(), RefLayerSupport::IsInstanceNormalizationSupported(), RefLayerSupport::IsL2NormalizationSupported(), RefLayerSupport::IsLogicalBinarySupported(), RefLayerSupport::IsLogSoftmaxSupported(), RefLayerSupport::IsLstmSupported(), RefLayerSupport::IsMaximumSupported(), RefLayerSupport::IsMeanSupported(), RefLayerSupport::IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), RefLayerSupport::IsMinimumSupported(), RefLayerSupport::IsMultiplicationSupported(), RefLayerSupport::IsNormalizationSupported(), RefLayerSupport::IsOutputSupported(), RefLayerSupport::IsPadSupported(), RefLayerSupport::IsPermuteSupported(), RefLayerSupport::IsPooling2dSupported(), RefLayerSupport::IsPooling3dSupported(), RefLayerSupport::IsPreluSupported(), RefLayerSupport::IsQLstmSupported(), LayerSupportBase::IsQuantizedLstmSupported(), RefLayerSupport::IsQuantizeSupported(), RefLayerSupport::IsRankSupported(), RefLayerSupport::IsReduceSupported(), RefLayerSupport::IsReshapeSupported(), RefLayerSupport::IsResizeSupported(), RefLayerSupport::IsReverseV2Supported(), RefLayerSupport::IsShapeSupported(), RefLayerSupport::IsSliceSupported(), RefLayerSupport::IsSoftmaxSupported(), RefLayerSupport::IsSpaceToBatchNdSupported(), RefLayerSupport::IsSpaceToDepthSupported(), RefLayerSupport::IsSplitterSupported(), RefLayerSupport::IsStackSupported(), RefLayerSupport::IsStridedSliceSupported(), RefLayerSupport::IsSubtractionSupported(), RefLayerSupport::IsTileSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), RefLayerSupport::IsTransposeSupported(), RefLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QLstm, armnn::QSymmS16, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::ReverseV2, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Tile, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
◆ IsLogicalBinarySupported()
Definition at line 1726 of file RefLayerSupport.cpp.
1734 std::array<DataType, 1> supportedTypes =
1739 bool supported =
true;
1740 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1741 "Reference LogicalBinary: input 0 type not supported");
1742 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1743 "Reference LogicalBinary: input 1 type not supported");
1745 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1746 "Reference LogicalBinary: input and output types do not match");
References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsLogSoftmaxSupported()
Definition at line 1751 of file RefLayerSupport.cpp.
1758 std::array<DataType, 3> supportedTypes =
1764 bool supported =
true;
1765 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1766 "Reference LogSoftmax: input type not supported");
1768 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1769 "Reference LogSoftmax: output type not supported");
1771 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1772 "Reference LogSoftmax: input and output types do not match");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsLstmSupported()
Definition at line 1777 of file RefLayerSupport.cpp.
1791 bool supported =
true;
1793 std::array<DataType,3> supportedTypes = {
1799 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1800 "Reference Lstm: input is not a supported type.");
1801 supported &=
CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1802 "Reference Lstm: input and outputStateIn types are mismatched");
1803 supported &=
CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1804 "Reference Lstm: input and cellStateIn types are mismatched");
1805 supported &=
CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1806 "Reference Lstm: input and scratchBuffer types are mismatched");
1807 supported &=
CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1808 "Reference Lstm: input and outputStateOut types are mismatched");
1809 supported &=
CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1810 "Reference Lstm: input and cellStateOut types are mismatched");
1812 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1813 "Reference Lstm: input and output types are mismatched");
1815 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1816 "Reference Lstm: input and InputToForgetWeights types are mismatched");
1817 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1818 "Reference Lstm: input and InputToCellWeights types are mismatched");
1819 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1820 "Reference Lstm: input and InputToOutputWeights types are mismatched");
1821 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1822 "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1823 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1824 "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1825 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1826 "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1827 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1828 "Reference Lstm: input and ForgetGateBias types are mismatched");
1829 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1830 "Reference Lstm: input and CellBias types are mismatched");
1831 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1832 "Reference Lstm: input and OutputGateBias types are mismatched");
1833 if (!descriptor.m_CifgEnabled)
1835 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1836 "Reference Lstm: input and InputToInputWeights types are mismatched");
1837 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1838 reasonIfUnsupported,
1839 "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1840 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1841 "Reference Lstm: input and InputGateBias types are mismatched");
1842 if (descriptor.m_PeepholeEnabled)
1844 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1845 reasonIfUnsupported,
1846 "Reference Lstm: input and CellToInputWeights types are mismatched");
1849 if (descriptor.m_PeepholeEnabled)
1851 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1852 "Reference Lstm: input and CellToForgetWeights types are mismatched");
1853 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1854 "Reference Lstm: input and CellToOutputWeights types are mismatched");
1856 if (descriptor.m_ProjectionEnabled)
1858 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1859 "Reference Lstm: input and mProjectionWeights types are mismatched");
1860 if (paramsInfo.m_ProjectionBias !=
nullptr)
1862 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1863 "Reference Lstm: input and ProjectionBias types are mismatched");
1866 if (descriptor.m_LayerNormEnabled)
1868 if (!descriptor.m_CifgEnabled)
1870 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1871 reasonIfUnsupported,
1872 "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1874 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1875 reasonIfUnsupported,
1876 "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1877 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1878 reasonIfUnsupported,
1879 "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1880 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1881 reasonIfUnsupported,
1882 "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMaximumSupported()
Definition at line 1888 of file RefLayerSupport.cpp.
1893 bool supported =
true;
1895 std::array<DataType,7> supportedTypes = {
1904 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1905 "Reference maximum: input 0 is not a supported type.");
1907 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1908 "Reference maximum: input 1 is not a supported type.");
1910 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1911 "Reference maximum: output is not a supported type.");
1913 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1914 "Reference maximum: input 0 and Input 1 types are mismatched");
1916 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1917 "Reference maximum: input and output types are mismatched");
1919 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1920 "Reference maximum: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMeanSupported()
Definition at line 1925 of file RefLayerSupport.cpp.
1930 bool supported =
true;
1931 std::string meanLayerStr =
"Mean";
1932 std::string outputTensorStr =
"output";
1934 std::array<DataType,6> supportedTypes =
1943 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1944 "Reference Mean: input type not supported.");
1946 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1947 "Reference Mean: input and output types are mismatched");
1949 if (descriptor.m_KeepDims)
1951 supported &=
CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1952 reasonIfUnsupported,
1953 CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1954 output.GetNumDimensions(),
1955 meanLayerStr, outputTensorStr).data());
1957 else if (descriptor.m_Axis.empty())
1960 reasonIfUnsupported,
1961 CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1962 meanLayerStr, outputTensorStr).data());
1966 auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1970 supported &=
CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1971 reasonIfUnsupported,
1972 CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1973 meanLayerStr, outputTensorStr).data());
1978 reasonIfUnsupported,
1979 CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1980 meanLayerStr, outputTensorStr).data());
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMemCopySupported()
Definition at line 1987 of file RefLayerSupport.cpp.
1991 bool supported =
true;
1993 std::array<DataType,7> supportedTypes =
2004 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2005 "Reference MemCopy: input type not supported");
2007 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2008 "Reference MemCopy: output type not supported");
2010 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2011 "Reference MemCopy: input and output types are mismatched");
References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMinimumSupported()
Definition at line 2016 of file RefLayerSupport.cpp.
2021 bool supported =
true;
2023 std::array<DataType,7> supportedTypes = {
2032 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2033 "Reference minimum: input 0 is not a supported type.");
2035 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2036 "Reference minimum: input 1 is not a supported type.");
2038 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2039 "Reference minimum: output is not a supported type.");
2041 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2042 "Reference minimum: input 0 and Input 1 types are mismatched");
2044 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2045 "Reference minimum: input and output types are mismatched");
2047 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2048 "Reference minimum: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMultiplicationSupported()
Definition at line 2053 of file RefLayerSupport.cpp.
2058 bool supported =
true;
2060 std::array<DataType,7> supportedTypes = {
2069 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2070 "Reference multiplication: input 0 is not a supported type.");
2072 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2073 "Reference multiplication: input 1 is not a supported type.");
2075 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2076 "Reference multiplication: output is not a supported type.");
2078 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2079 "Reference multiplication: input 0 and Input 1 types are mismatched");
2081 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2082 "Reference multiplication: input and output types are mismatched");
2084 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2085 "Reference multiplication: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsNormalizationSupported()
Definition at line 2090 of file RefLayerSupport.cpp.
2098 std::array<DataType, 6> supportedTypes =
2107 bool supported =
true;
2109 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2110 "Reference normalization: input type not supported.");
2112 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2113 "Reference normalization: output type not supported.");
2115 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2116 "Reference normalization: input and output shapes have different "
2117 "num total elements.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsOutputSupported()
◆ IsPadSupported()
Definition at line 2128 of file RefLayerSupport.cpp.
2134 bool supported =
true;
2137 std::array<DataType,6> supportedTypes =
2146 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2147 "Reference pad: input is not a supported type.");
2149 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2150 "Reference pad: output is not a supported type.");
2152 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2153 "Reference pad: input and output types are mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsPermuteSupported()
Definition at line 2158 of file RefLayerSupport.cpp.
2164 bool supported =
true;
2167 std::array<DataType, 6> supportedTypes =
2177 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2178 "Reference permute: input is not a supported type.");
2180 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2181 "Reference permute: output is not a supported type.");
2183 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2184 "Reference permute: input and output types are mismatched.");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsPooling2dSupported()
Definition at line 2189 of file RefLayerSupport.cpp.
2195 bool supported =
true;
2198 std::array<DataType,6> supportedTypes =
2207 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2208 "Reference poolind2d: input is not a supported type.");
2210 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2211 "Reference poolind2d: output is not a supported type.");
2213 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2214 "Reference poolind2d: input and output types are mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsPooling3dSupported()
Definition at line 2219 of file RefLayerSupport.cpp.
2225 bool supported =
true;
2228 std::array<DataType,6> supportedTypes =
2237 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2238 "Reference poolind3d: input is not a supported type.");
2240 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2241 "Reference poolind3d: output is not a supported type.");
2243 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2244 "Reference poolind3d: input and output types are mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsPreluSupported()
Definition at line 2708 of file RefLayerSupport.cpp.
2713 bool supported =
true;
2715 std::array<DataType, 6> supportedTypes
2724 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2725 "PReLU: input is not a supported type.");
2727 supported &=
CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2728 "PReLU: alpha is not a supported type.");
2730 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2731 "PReLU: output is not a supported type.");
2733 supported &=
CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2734 "PReLU: input, alpha and output types are mismatched");
2736 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2737 "PReLU: shapes are not suitable for implicit broadcast");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsQLstmSupported()
◆ IsQuantizeSupported()
Definition at line 2274 of file RefLayerSupport.cpp.
2278 bool supported =
true;
2281 std::array<DataType,7> supportedInputTypes = {
2290 supported &=
CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2291 "Reference quantize: input type not supported.");
2294 std::array<DataType,4> supportedOutputTypes = {
2300 supported &=
CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2301 "Reference quantize: output type not supported.");
2303 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2304 "Reference quantize: input and output shapes have different num total elements.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsRankSupported()
◆ IsReduceSupported()
Definition at line 2324 of file RefLayerSupport.cpp.
2330 bool supported =
true;
2331 std::array<DataType,7> supportedTypes =
2341 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2342 "Reference Reduce: input type not supported");
2344 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2345 "Reference Reduce: output type not supported");
2347 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2348 "Reference Reduce: input and output types not matching");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsReshapeSupported()
Definition at line 2353 of file RefLayerSupport.cpp.
2361 std::array<DataType,8> supportedOutputTypes =
2373 return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2374 "Reference reshape: input type not supported.");
References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsResizeSupported()
Definition at line 2377 of file RefLayerSupport.cpp.
2383 bool supported =
true;
2384 std::array<DataType,7> supportedTypes =
2395 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2396 "Reference Resize: input type not supported");
2398 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2399 "Reference Resize: output type not supported");
2401 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2402 "Reference Resize: input and output types not matching");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsReverseV2Supported()
Definition at line 2407 of file RefLayerSupport.cpp.
2412 bool supported =
true;
2414 std::array<DataType,8> supportedTypes =
2426 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2427 "Reference ReverseV2: input0 type not supported");
2429 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2430 "Reference ReverseV2: output type not supported");
2432 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2433 "Reference ReverseV2: input0 and output types not matching");
2435 std::array<DataType,6> input2SupportedTypes =
2440 supported &=
CheckSupportRule(TypeAnyOf(input1, input2SupportedTypes), reasonIfUnsupported,
2441 "Reference ReverseV2: input1 type not supported");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsShapeSupported()
◆ IsSliceSupported()
Definition at line 2464 of file RefLayerSupport.cpp.
2470 bool supported =
true;
2472 std::array<DataType, 5> supportedTypes =
2481 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2482 "Reference Slice: input type not supported");
2484 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2485 "Reference Slice: output type not supported");
2487 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2488 "Reference Slice: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSoftmaxSupported()
Definition at line 2493 of file RefLayerSupport.cpp.
2499 bool supported =
true;
2500 std::array<DataType,7> supportedTypes =
2510 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2511 "Reference Softmax: output type not supported");
2513 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2514 "Reference Softmax: input type not supported");
2516 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2517 "Reference Softmax: input type not supported");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSpaceToBatchNdSupported()
Definition at line 2522 of file RefLayerSupport.cpp.
2528 bool supported =
true;
2529 std::array<DataType,6> supportedTypes =
2538 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2539 "Reference SpaceToBatchNd: input type not supported");
2541 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2542 "Reference SpaceToBatchNd: output type not supported");
2544 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2545 "Reference SpaceToBatchNd: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSpaceToDepthSupported()
Definition at line 2550 of file RefLayerSupport.cpp.
2557 bool supported =
true;
2559 std::array<DataType,6> supportedTypes =
2568 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2569 "Reference SpaceToDepth: input type not supported");
2571 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2572 "Reference SpaceToDepth: output type not supported");
2574 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2575 "Reference SpaceToDepth: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSplitterSupported()
Definition at line 2580 of file RefLayerSupport.cpp.
2586 bool supported =
true;
2587 std::array<DataType,6> supportedTypes =
2596 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2597 "Reference splitter: output type not supported");
2598 for (
const TensorInfo& output : outputs)
2600 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2601 "Reference splitter: input type not supported");
2603 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2604 "Reference splitter: input and output types mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsStackSupported()
Definition at line 2610 of file RefLayerSupport.cpp.
2617 bool supported =
true;
2618 std::array<DataType,7> supportedTypes =
2628 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2629 "Reference stack: output type not supported");
2630 for (
const TensorInfo* input : inputs)
2633 supported &=
CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2634 "Reference stack: input type not supported");
2636 supported &=
CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2637 "Reference stack: input and output types mismatched.");
References ARMNN_ASSERT, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsStridedSliceSupported()
Definition at line 2643 of file RefLayerSupport.cpp.
2649 bool supported =
true;
2651 std::array<DataType,5> supportedTypes =
2659 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2660 "Reference StridedSlice: input type not supported");
2662 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2663 "Reference StridedSlice: output type not supported");
2665 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2666 "Reference StridedSlice: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSubtractionSupported()
Definition at line 2671 of file RefLayerSupport.cpp.
2676 bool supported =
true;
2678 std::array<DataType,7> supportedTypes = {
2687 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2688 "Reference subtraction: input 0 is not a supported type.");
2690 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2691 "Reference subtraction: input 1 is not a supported type.");
2693 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2694 "Reference subtraction: output is not a supported type.");
2696 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2697 "Reference subtraction: input 0 and Input 1 types are mismatched");
2699 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2700 "Reference subtraction: input and output types are mismatched");
2702 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2703 "Reference subtraction: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsTileSupported()
Definition at line 2742 of file RefLayerSupport.cpp.
2749 bool supported =
true;
2751 std::array<DataType, 7> supportedTypes
2762 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2763 "Tile: input type not supported.");
2765 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2766 "Tile: output type not supported");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsTransposeConvolution2dSupported()
Definition at line 2771 of file RefLayerSupport.cpp.
2779 bool supported =
true;
2781 std::array<DataType,7> supportedTypes =
2791 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2792 "Reference TransposeConvolution2d: input is not a supported type.");
2794 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2795 "Reference TransposeConvolution2d: output is not a supported type.");
2797 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2798 "Reference TransposeConvolution2d: input and output types mismatched.");
2801 const DataType inputType = input.GetDataType();
2804 std::array<DataType, 3> supportedWeightTypes =
2811 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2812 "Reference TransposeConvolution2d: weights type not supported for "
2813 "quantized input.");
2817 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2818 "Reference TransposeConvolution2d: weights is not a supported type.");
2820 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2821 "Reference TransposeConvolution2d: input and weights types mismatched.");
2824 if (biases.has_value())
2826 std::array<DataType,4> biasesSupportedTypes =
2832 supported &=
CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2833 "Reference TransposeConvolution2d: biases is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsTransposeSupported()
Definition at line 2839 of file RefLayerSupport.cpp.
2845 bool supported =
true;
2848 std::array<DataType, 6> supportedTypes =
2858 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2859 "Reference transpose: input is not a supported type.");
2861 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2862 "Reference transpose: output is not a supported type.");
2864 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2865 "Reference transpose: input and output types are mismatched.");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsUnidirectionalSequenceLstmSupported()
Definition at line 2870 of file RefLayerSupport.cpp.
2887 bool supported =
true;
2889 std::array<DataType, 2> supportedTypes =
2895 std::array<DataType, 2> supportedWeightTypes =
2901 std::array<DataType, 3> supportedBiasTypes =
2909 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2910 "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2911 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2912 "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2915 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2916 reasonIfUnsupported,
2917 "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2918 "is not a supported type.");
2919 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2920 reasonIfUnsupported,
2921 "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2922 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2923 reasonIfUnsupported,
2924 "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2925 "is not a supported type.");
2926 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2927 reasonIfUnsupported,
2928 "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2929 "is not a supported type.");
2930 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2931 reasonIfUnsupported,
2932 "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2933 "is not a supported type.");
2934 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2935 reasonIfUnsupported,
2936 "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2937 "is not a supported type.");
2939 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2940 "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2941 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2942 "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2943 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2944 "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2945 if (!descriptor.m_CifgEnabled)
2947 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2948 reasonIfUnsupported,
2949 "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2950 "is not a supported type.");
2951 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2952 reasonIfUnsupported,
2953 "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2954 "is not a supported type.");
2955 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2956 "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2957 if (descriptor.m_PeepholeEnabled)
2959 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2960 reasonIfUnsupported,
2961 "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2962 "is not a supported type.");
2965 if (descriptor.m_PeepholeEnabled)
2967 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2968 reasonIfUnsupported,
2969 "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2970 "is not a supported type.");
2971 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2972 reasonIfUnsupported,
2973 "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2974 "is not a supported type.");
2976 if (descriptor.m_ProjectionEnabled)
2978 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2979 reasonIfUnsupported,
2980 "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2981 "is not a supported type.");
2982 if (paramsInfo.m_ProjectionBias !=
nullptr)
2984 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2985 "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2989 if (descriptor.m_LayerNormEnabled)
2991 if (!descriptor.m_CifgEnabled)
2993 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2994 reasonIfUnsupported,
2995 "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2996 "is not a supported type.");
2998 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2999 reasonIfUnsupported,
3000 "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
3001 "is not a supported type.");
3002 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
3003 reasonIfUnsupported,
3004 "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
3005 "is not a supported type.");
3006 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
3007 reasonIfUnsupported,
3008 "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
3009 "is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, armnn::QAsymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
The documentation for this class was generated from the following files:
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
#define ARMNN_ASSERT(COND)
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReverseV2Supported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
constexpr bool IsQuantized8BitType(DataType dataType)
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBroadcastToSupported(const TensorInfo &input, const TensorInfo &output, const BroadcastToDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
void IgnoreUnused(Ts &&...)
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
@ UnidirectionalSequenceLstm
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const