ArmNN
 25.11
Loading...
Searching...
No Matches
RefLayerSupport Class Reference

#include <RefLayerSupport.hpp>

Inheritance diagram for RefLayerSupport:
[legend]
Collaboration diagram for RefLayerSupport:
[legend]

Public Member Functions

bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
 Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBroadcastToSupported (const TensorInfo &input, const TensorInfo &output, const BroadcastToDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReverseV2Supported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsScatterNdSupported (const TensorInfo &input, const TensorInfo &indices, const TensorInfo &updates, const TensorInfo &output, const ScatterNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo > > &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTileSupported (const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Public Member Functions inherited from LayerSupportBase
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const

Additional Inherited Members

Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
virtual ~ILayerSupport ()

Detailed Description

Definition at line 12 of file RefLayerSupport.hpp.

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo & input,
const TensorInfo & output,
const ActivationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 560 of file RefLayerSupport.cpp.

564{
565 bool supported = true;
566
567 // Define supported types.
568 std::array<DataType,6> supportedTypes = {
569 DataType::Float32,
570 DataType::Float16,
571 DataType::QAsymmS8,
572 DataType::QAsymmU8,
573 DataType::QSymmS16
574 };
575
576 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
577 "Reference activation: input type not supported.");
578
579 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
580 "Reference activation: output type not supported.");
581
582 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
583 "Reference activation: input and output types mismatched.");
584
585 supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
586 "Reference activation: input and output shapes are of different rank.");
587
588
589 struct ActivationFunctionSupported : public Rule
590 {
591 ActivationFunctionSupported(const ActivationDescriptor& desc)
592 {
593 switch(desc.m_Function)
594 {
595 case ActivationFunction::Abs:
596 case ActivationFunction::BoundedReLu:
597 case ActivationFunction::Elu:
598 case ActivationFunction::Gelu:
599 case ActivationFunction::HardSwish:
600 case ActivationFunction::LeakyReLu:
601 case ActivationFunction::Linear:
602 case ActivationFunction::ReLu:
603 case ActivationFunction::Sigmoid:
604 case ActivationFunction::SoftReLu:
605 case ActivationFunction::Sqrt:
606 case ActivationFunction::Square:
607 case ActivationFunction::TanH:
608 {
609 m_Res = true;
610 break;
611 }
612 default:
613 {
614 m_Res = false;
615 break;
616 }
617 }
618 }
619 };
620
621 // Function is supported
622 supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
623 "Reference activation: function not supported.");
624
625 return supported;
626}
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)

References armnn::Abs, armnn::BoundedReLu, armnn::CheckSupportRule(), armnn::Elu, armnn::Float16, armnn::Float32, armnn::Gelu, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.

Referenced by IsLayerSupported().

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 628 of file RefLayerSupport.cpp.

632{
633 bool supported = true;
634
635 std::array<DataType,7> supportedTypes = {
636 DataType::Float32,
637 DataType::Float16,
638 DataType::QAsymmS8,
639 DataType::QAsymmU8,
640 DataType::QSymmS16,
641 DataType::Signed32
642 };
643
644 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
645 "Reference addition: input 0 is not a supported type.");
646
647 supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
648 "Reference addition: input 1 is not a supported type.");
649
650 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
651 "Reference addition: output is not a supported type.");
652
653 supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
654 "Reference addition: input 0 and Input 1 types are mismatched");
655
656 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
657 "Reference addition: input and output types are mismatched");
658
659 supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
660 "Reference addition: shapes are not suitable for implicit broadcast.");
661
662 return supported;
663}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo & input,
const TensorInfo & output,
const ArgMinMaxDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 665 of file RefLayerSupport.cpp.

668{
669 IgnoreUnused(descriptor);
670
671 std::array<DataType, 8> supportedInputTypes =
672 {
673 DataType::Float16,
674 DataType::Float32,
675 DataType::QAsymmS8,
676 DataType::QAsymmU8,
677 DataType::QSymmS16,
678 DataType::Signed32,
679 DataType::Signed64
680 };
681
682 std::array<DataType,2> supportedOutputTypes = {
683 DataType::Signed32,
684 DataType::Signed64
685 };
686
687 bool supported = true;
688
689 supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
690 "Reference ArgMinMax: input is not a supported type.");
691 supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
692 "Reference ArgMinMax: output type not supported");
693
694 return supported;
695}
void IgnoreUnused(Ts &&...)

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and armnn::Signed64.

Referenced by IsLayerSupported().

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo & inputX,
const TensorInfo & inputY,
const TensorInfo & output,
const BatchMatMulDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 697 of file RefLayerSupport.cpp.

702{
703 IgnoreUnused(descriptor);
704
705 std::array<DataType, 6> supportedTypes =
706 {
707 DataType::Float16,
708 DataType::Float32,
709 DataType::QAsymmS8,
710 DataType::QAsymmU8,
711 DataType::QSymmS16
712 };
713
714 bool supported = true;
715
716 supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
717 "Reference batch matrix multiplication: input X is not a supported type");
718
719 supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
720 "Reference batch matrix multiplication: input Y is not a supported type");
721
722 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
723 "Reference batch matrix multiplication: output is not a supported type");
724
725 supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
726 "Reference batch matrix multiplication: input X and input Y types are mismatched");
727
728 supported &= CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
729 "Reference batch matrix multiplication: inputs and output types are mismatched");
730
731 supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputX, 2),
732 reasonIfUnsupported,
733 "Reference batch matrix multiplication: input X is not of rank 2 or greater");
734
735 supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputY, 2),
736 reasonIfUnsupported,
737 "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
738
739 return supported;
740}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo & input,
const TensorInfo & output,
const TensorInfo & mean,
const TensorInfo & var,
const TensorInfo & beta,
const TensorInfo & gamma,
const BatchNormalizationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 742 of file RefLayerSupport.cpp.

750{
751 IgnoreUnused(descriptor);
752
753 std::array<DataType, 6> supportedTypes =
754 {
755 DataType::Float32,
756 DataType::Float16,
757 DataType::QAsymmS8,
758 DataType::QAsymmU8,
759 DataType::QSymmS16
760 };
761
762 bool supported = true;
763
764 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
765 "Reference batch normalization: input is not a supported type.");
766
767 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
768 "Reference batch normalization: output is not a supported type.");
769
770 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
771 "Reference batch normalization: input and output types are mismatched");
772
773 supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
774 "Reference batch normalization: mean is not a supported type.");
775
776 supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
777 "Reference batch normalization: variance is not a supported type.");
778
779 supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
780 "Reference batch normalization: beta is not a supported type.");
781
782 supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
783 "Reference batch normalization: gamma is not a supported type.");
784
785 return supported;
786}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo & input,
const TensorInfo & output,
const BatchToSpaceNdDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 788 of file RefLayerSupport.cpp.

792{
793 IgnoreUnused(descriptor);
794
795 bool supported = true;
796
797 std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
798 std::string inputTensorStr = "input";
799 std::string outputTensorStr = "output";
800
801 // Define supported types.
802 std::array<DataType,6> supportedTypes =
803 {
804 DataType::Float32,
805 DataType::Float16,
806 DataType::QAsymmS8,
807 DataType::QAsymmU8,
808 DataType::QSymmS16
809 };
810
811 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
812 "Reference BatchToSpaceNd: input type not supported.");
813
814 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
815 "Reference BatchToSpaceNd: output type not supported.");
816
817 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
818 "Reference BatchToSpaceNd: input and output types mismatched.");
819
820 return supported;
821}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsBroadcastToSupported()

bool IsBroadcastToSupported ( const TensorInfo & input,
const TensorInfo & output,
const BroadcastToDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 823 of file RefLayerSupport.cpp.

827{
828 IgnoreUnused(descriptor);
829
830 bool supported = true;
831
832 std::array<DataType, 8> supportedTypes
833 {
834 DataType::Float32,
835 DataType::Float16,
836 DataType::QAsymmS8,
837 DataType::QAsymmU8,
838 DataType::QSymmS8,
839 DataType::QSymmS16,
840 DataType::Signed32,
841 DataType::Signed64
842 };
843
844 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
845 "BroadcastTo: input type not supported.");
846
847 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
848 "BroadcastTo: output type not supported");
849
850 return supported;
851}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and armnn::Signed64.

Referenced by IsLayerSupported().

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 853 of file RefLayerSupport.cpp.

856{
857 std::array<DataType, 10> supportedInputTypes =
858 {
859 DataType::Float32,
860 DataType::Float16,
861 DataType::QSymmS8,
862 DataType::QAsymmS8,
863 DataType::QAsymmU8,
864 DataType::QSymmS16,
865 DataType::Signed32,
866 DataType::Signed64
867 };
868
869 bool supported = true;
870 supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
871 "Reference cast: input is not a supported type");
872
873
874 supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
875 "Reference cast: output is not a supported type");
876
877 supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
878 "Reference cast: input and output shapes have different number of total elements");
879
880 return supported;
881}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and armnn::Signed64.

Referenced by IsLayerSupported().

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo & input,
const TensorInfo & output,
const ChannelShuffleDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 883 of file RefLayerSupport.cpp.

887{
888 IgnoreUnused(descriptor);
889 bool supported = true;
890
891 // Define supported output and inputs types.
892 std::array<DataType, 7> supportedTypes =
893 {
894 DataType::Float32,
895 DataType::Float16,
896 DataType::QAsymmS8,
897 DataType::QAsymmU8,
898 DataType::QSymmS8,
899 DataType::QSymmS16
900 };
901
902 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
903 "Reference ChannelShuffle: input is not a supported type.");
904
905 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
906 "Reference ChannelShuffle: output is not a supported type.");
907
908 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
909 "Reference ChannelShuffle: input and output types are mismatched.");
910
911 return supported;
912}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by IsLayerSupported().

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
const ComparisonDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 915 of file RefLayerSupport.cpp.

920{
921 IgnoreUnused(descriptor);
922 std::array<DataType, 8> supportedInputTypes =
923 {
924 DataType::Boolean,
925 DataType::Float32,
926 DataType::Float16,
927 DataType::QAsymmS8,
928 DataType::QAsymmU8,
929 DataType::QSymmS16,
930 DataType::Signed32
931 };
932
933 bool supported = true;
934 supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
935 "Reference comparison: input 0 is not a supported type");
936
937 supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
938 "Reference comparison: input 0 and Input 1 types are mismatched");
939
940 supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
941 "Reference comparison: output is not of type Boolean");
942
943 supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
944 "Reference comparison: shapes are not suitable for implicit broadcast.");
945
946 return supported;
947}

References armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo * > inputs,
const TensorInfo & output,
const OriginsDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 949 of file RefLayerSupport.cpp.

953{
954 IgnoreUnused(descriptor);
955
956 bool supported = true;
957 std::array<DataType,7> supportedTypes =
958 {
959 DataType::Float32,
960 DataType::Float16,
961 DataType::QAsymmS8,
962 DataType::QAsymmU8,
963 DataType::QSymmS16,
964 DataType::Signed32
965 };
966
967 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
968 "Reference concatenation: output type not supported");
969 for (const TensorInfo* input : inputs)
970 {
971 supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
972 "Reference concatenation: input type not supported");
973
974 supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
975 "Reference concatenation: input and output types mismatched.");
976 }
977
978 return supported;
979}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 981 of file RefLayerSupport.cpp.

983{
984 std::array<DataType, 8> supportedTypes =
985 {
986 DataType::Float16,
987 DataType::Float32,
988 DataType::QAsymmS8,
989 DataType::QAsymmU8,
990 DataType::QSymmS8,
991 DataType::QSymmS16,
992 DataType::Signed32,
993 DataType::Signed64
994 };
995
996 return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
997 "Reference constant: output is not a supported type.");
998}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and armnn::Signed64.

Referenced by IsLayerSupported().

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1000 of file RefLayerSupport.cpp.

1003{
1004 return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1005 input.GetDataType(),
1006 &TrueFunc<>,
1007 &FalseInputFuncF32<>,
1008 &FalseFuncU8<>,
1009 &FalseFuncI32<>,
1010 &FalseFuncU8<>) &&
1011 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1012 output.GetDataType(),
1013 &FalseOutputFuncF16<>,
1014 &TrueFunc<>,
1015 &FalseFuncU8<>,
1016 &FalseFuncI32<>,
1017 &FalseFuncU8<>));
1018}
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

References armnn::FalseFuncI32(), armnn::FalseFuncU8(), armnn::FalseInputFuncF32(), armnn::FalseOutputFuncF16(), TensorInfo::GetDataType(), armnn::IsSupportedForDataTypeGeneric(), and armnn::TrueFunc().

Referenced by IsLayerSupported().

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1020 of file RefLayerSupport.cpp.

1023{
1024 return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1025 input.GetDataType(),
1026 &FalseInputFuncF16<>,
1027 &TrueFunc<>,
1028 &FalseFuncU8<>,
1029 &FalseFuncI32<>,
1030 &FalseFuncU8<>) &&
1031 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1032 output.GetDataType(),
1033 &TrueFunc<>,
1034 &FalseOutputFuncF32<>,
1035 &FalseFuncU8<>,
1036 &FalseFuncI32<>,
1037 &FalseFuncU8<>));
1038}

References armnn::FalseFuncI32(), armnn::FalseFuncU8(), armnn::FalseInputFuncF16(), armnn::FalseOutputFuncF32(), TensorInfo::GetDataType(), armnn::IsSupportedForDataTypeGeneric(), and armnn::TrueFunc().

Referenced by IsLayerSupported().

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo & input,
const TensorInfo & output,
const Convolution2dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1040 of file RefLayerSupport.cpp.

1046{
1047 bool supported = true;
1048
1049 // Define supported types.
1050 std::array<DataType,7> supportedTypes =
1051 {
1052 DataType::Float32,
1053 DataType::Float16,
1054 DataType::QAsymmS8,
1055 DataType::QAsymmU8,
1056 DataType::QSymmS8,
1057 DataType::QSymmS16
1058 };
1059
1060 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1061 "Reference Convolution2d: input is not a supported type.");
1062
1063 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1064 "Reference Convolution2d: output is not a supported type.");
1065
1066 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1067 "Reference Convolution2d: input and output types mismatched.");
1068
1069
1070 const DataType inputType = input.GetDataType();
1071 if (IsQuantized8BitType(inputType))
1072 {
1073 std::array<DataType, 3> supportedWeightTypes =
1074 {
1075 DataType::QAsymmS8,
1076 DataType::QAsymmU8,
1077 DataType::QSymmS8
1078 };
1079
1080 supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1081 "Reference Convolution2d: weights type not supported for quantized input.");
1082 }
1083 else
1084 {
1085 supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1086 "Reference Convolution2d: weights is not a supported type.");
1087
1088 supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1089 "Reference Convolution2d: input and weights types mismatched.");
1090 }
1091
1092 if (biases.has_value())
1093 {
1094 std::array<DataType,4> biasesSupportedTypes =
1095 {
1096 DataType::Float32,
1097 DataType::Float16,
1098 DataType::Signed32
1099 };
1100
1101 supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1102 "Reference Convolution2d: biases is not a supported type.");
1103 }
1104 IgnoreUnused(descriptor);
1105
1106 return supported;
1107}
constexpr bool IsQuantized8BitType(DataType dataType)
DataType
Definition Types.hpp:49

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< IsReference, T >::value().

Referenced by IsLayerSupported().

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo & input,
const TensorInfo & output,
const Convolution3dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1109 of file RefLayerSupport.cpp.

1115{
1116 bool supported = true;
1117
1118 // Define supported types.
1119 std::array<DataType,7> supportedTypes =
1120 {
1121 DataType::Float32,
1122 DataType::Float16,
1123 DataType::QAsymmS8,
1124 DataType::QAsymmU8,
1125 DataType::QSymmS8,
1126 DataType::QSymmS16
1127 };
1128
1129 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1130 "Reference Convolution3d: input is not a supported type.");
1131
1132 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1133 "Reference Convolution3d: output is not a supported type.");
1134
1135 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1136 "Reference Convolution3d: input and output types mismatched.");
1137
1138 const DataType inputType = input.GetDataType();
1139 if (IsQuantized8BitType(inputType))
1140 {
1141 std::array<DataType, 3> supportedWeightTypes =
1142 {
1143 DataType::QAsymmS8,
1144 DataType::QAsymmU8,
1145 DataType::QSymmS8
1146 };
1147
1148 supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1149 "Reference Convolution3d: weights type not supported for quantized input.");
1150 }
1151 else
1152 {
1153 supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1154 "Reference Convolution3d: weights is not a supported type.");
1155
1156 supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1157 "Reference Convolution3d: input and weights types mismatched.");
1158 }
1159
1160 if (biases.has_value())
1161 {
1162 std::array<DataType,4> biasesSupportedTypes =
1163 {
1164 DataType::Float32,
1165 DataType::Float16,
1166 DataType::Signed32
1167 };
1168
1169 supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1170 "Reference Convolution3d: biases is not a supported type.");
1171 }
1172 IgnoreUnused(descriptor);
1173
1174 return supported;
1175}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< IsReference, T >::value().

Referenced by IsLayerSupported().

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1177 of file RefLayerSupport.cpp.

1180{
1181 bool supported = true;
1182
1183 std::array<DataType, 8> supportedTypes =
1184 {
1185 DataType::BFloat16,
1186 DataType::Float16,
1187 DataType::Float32,
1188 DataType::QAsymmS8,
1189 DataType::QAsymmU8,
1190 DataType::QSymmS8,
1191 DataType::QSymmS16,
1192 DataType::Signed32
1193 };
1194
1195 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1196 "Reference for Debug layer: input type not supported");
1197
1198 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1199 "Reference for Debug layer: output type not supported");
1200
1201 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1202 "Reference for Debug layer: input and output types are mismatched");
1203
1204 return supported;
1205}

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo & input,
const TensorInfo & output,
const DepthToSpaceDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1207 of file RefLayerSupport.cpp.

1211{
1212 IgnoreUnused(descriptor);
1213 bool supported = true;
1214
1215 std::array<DataType,6> supportedTypes =
1216 {
1217 DataType::Float32,
1218 DataType::Float16,
1219 DataType::QAsymmS8,
1220 DataType::QAsymmU8,
1221 DataType::QSymmS16
1222 };
1223
1224 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1225 "Reference DepthToSpace: input type not supported");
1226
1227 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1228 "Reference DepthToSpace: output type not supported");
1229
1230 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1231 "Reference DepthToSpace: input and output types are mismatched");
1232
1233 return supported;
1234}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo & input,
const TensorInfo & output,
const DepthwiseConvolution2dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1236 of file RefLayerSupport.cpp.

1242{
1243 IgnoreUnused(descriptor);
1244 bool supported = true;
1245
1246 // Define supported types.
1247 std::array<DataType,7> supportedTypes =
1248 {
1249 DataType::Float32,
1250 DataType::Float16,
1251 DataType::QAsymmS8,
1252 DataType::QAsymmU8,
1253 DataType::QSymmS8,
1254 DataType::QSymmS16
1255 };
1256
1257 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1258 "Reference DepthwiseConvolution2d: input is not a supported type.");
1259
1260 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1261 "Reference DepthwiseConvolution2d: output is not a supported type.");
1262
1263 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1264 "Reference DepthwiseConvolution2d: input and output types mismatched.");
1265
1266 const DataType inputType = input.GetDataType();
1267 if (IsQuantized8BitType(inputType))
1268 {
1269 std::array<DataType, 3> supportedWeightTypes =
1270 {
1271 DataType::QAsymmS8,
1272 DataType::QAsymmU8,
1273 DataType::QSymmS8,
1274 };
1275
1276 supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1277 "Reference DepthwiseConvolution2d: weights type not supported for "
1278 "quantized input.");
1279 }
1280 else
1281 {
1282 supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1283 "Reference DepthwiseConvolution2d: weights is not a supported type.");
1284
1285 supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1286 "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1287 }
1288
1289 if (biases.has_value())
1290 {
1291 std::array<DataType,4> biasesSupportedTypes =
1292 {
1293 DataType::Float32,
1294 DataType::Float16,
1295 DataType::Signed32
1296 };
1297 supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1298 "Reference DepthwiseConvolution2d: biases is not a supported type.");
1299 }
1300
1301 return supported;
1302
1303}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< IsReference, T >::value().

Referenced by IsDilatedDepthwiseConvolutionSupported(), and IsLayerSupported().

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1305 of file RefLayerSupport.cpp.

1308{
1309 bool supported = true;
1310
1311 std::array<DataType,5> supportedInputTypes = {
1312 DataType::QAsymmS8,
1313 DataType::QAsymmU8,
1314 DataType::QSymmS8,
1315 DataType::QSymmS16,
1316 DataType::Float16
1317 };
1318
1319 supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1320 "Reference for Dequantize layer: input type not supported.");
1321
1322 supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1323 "Reference for Dequantize layer: per-axis quantized input not supported.");
1324
1325 std::array<DataType,3> supportedOutputTypes = {
1326 DataType::Float32,
1327 DataType::Float16
1328 };
1329
1330 supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1331 "Reference for Dequantize layer: output type not supported.");
1332
1333 supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1334 "Reference for Dequantize layer: input/output shapes have different num total "
1335 "elements.");
1336
1337 return supported;
1338}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by IsLayerSupported().

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo & boxEncodings,
const TensorInfo & scores,
const TensorInfo & anchors,
const TensorInfo & detectionBoxes,
const TensorInfo & detectionClasses,
const TensorInfo & detectionScores,
const TensorInfo & numDetections,
const DetectionPostProcessDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1340 of file RefLayerSupport.cpp.

1349{
1350 IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
1351
1352 bool supported = true;
1353
1354 std::array<DataType,6> supportedInputTypes =
1355 {
1356 DataType::Float32,
1357 DataType::Float16,
1358 DataType::QAsymmS8,
1359 DataType::QAsymmU8,
1360 DataType::QSymmS16
1361 };
1362
1363 supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1364 "Reference DetectionPostProcess: input 0 is not a supported type.");
1365
1366 supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1367 "Reference DetectionPostProcess: input 1 is not a supported type.");
1368
1369 return supported;
1370}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo & input,
const TensorInfo & output,
const DepthwiseConvolution2dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1372 of file RefLayerSupport.cpp.

1378{
1379 return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
1380}

References IsDepthwiseConvolutionSupported().

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1382 of file RefLayerSupport.cpp.

1386{
1387 bool supported = true;
1388
1389 std::array<DataType,7> supportedTypes = {
1390 DataType::Float32,
1391 DataType::Float16,
1392 DataType::QAsymmS8,
1393 DataType::QAsymmU8,
1394 DataType::QSymmS16,
1395 DataType::Signed32
1396 };
1397
1398 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1399 "Reference division: input 0 is not a supported type.");
1400
1401 supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1402 "Reference division: input 1 is not a supported type.");
1403
1404 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1405 "Reference division: output is not a supported type.");
1406
1407 supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1408 "Reference division: input 0 and Input 1 types are mismatched");
1409
1410 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1411 "Reference division: input and output types are mismatched");
1412
1413 supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1414 "Reference division: shapes are not suitable for implicit broadcast.");
1415
1416 return supported;
1417}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo & input,
const TensorInfo & output,
const ElementwiseUnaryDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1419 of file RefLayerSupport.cpp.

1423{
1424 IgnoreUnused(descriptor);
1425
1426 std::array<DataType, 7> supportedTypes =
1427 {
1428 DataType::Float32,
1429 DataType::Float16,
1430 DataType::QAsymmS8,
1431 DataType::QAsymmU8,
1432 DataType::QSymmS16,
1433 DataType::Signed32
1434 };
1435
1436 std::array<DataType, 1> logicalSupportedTypes =
1437 {
1438 DataType::Boolean
1439 };
1440
1441 bool supported = true;
1442
1443 if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1444 {
1445 supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1446 "Reference elementwise unary: input type not supported");
1447
1448 supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1449 "Reference elementwise unary: output type not supported");
1450 }
1451 else
1452 {
1453 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1454 "Reference elementwise unary: input type not supported");
1455
1456 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1457 "Reference elementwise unary: output type not supported");
1458 }
1459
1460 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1461 "Reference elementwise unary: input and output types not matching");
1462
1463 supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1464 "Reference elementwise unary: input and output shapes"
1465 "have different number of total elements");
1466
1467 return supported;
1468}

References armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo & input,
const FakeQuantizationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1470 of file RefLayerSupport.cpp.

1473{
1474 IgnoreUnused(descriptor);
1475 bool supported = true;
1476
1477 std::array<DataType,1> supportedTypes =
1478 {
1479 DataType::Float32
1480 };
1481
1482 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1483 "Reference fake quantization: input type not supported.");
1484
1485 return supported;
1486}

References armnn::CheckSupportRule(), armnn::Float32, and armnn::IgnoreUnused().

Referenced by IsLayerSupported().

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo & input,
const TensorInfo & output,
const FillDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1488 of file RefLayerSupport.cpp.

1492{
1493 IgnoreUnused(descriptor);
1494 IgnoreUnused(output);
1495
1496 bool supported = true;
1497
1498 std::array<DataType,3> supportedTypes =
1499 {
1500 DataType::Float32,
1501 DataType::Float16,
1502 DataType::Signed32
1503 };
1504
1505 supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1506 "Reference Fill: input type not supported.");
1507
1508 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1509 "Reference Fill: output type not supported.");
1510 return supported;
1511}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1513 of file RefLayerSupport.cpp.

1516{
1517 bool supported = true;
1518
1519 std::array<DataType,3> supportedTypes =
1520 {
1521 DataType::Float32,
1522 DataType::Float16
1523 };
1524
1525 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1526 "Reference Floor: input type not supported.");
1527
1528 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1529 "Reference Floor: output type not supported.");
1530
1531 return supported;
1532}

References armnn::CheckSupportRule(), armnn::Float16, and armnn::Float32.

Referenced by IsLayerSupported().

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo & input,
const TensorInfo & output,
const TensorInfo & weights,
const TensorInfo & biases,
const FullyConnectedDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1534 of file RefLayerSupport.cpp.

1540{
1541 bool supported = true;
1542
1543 // Define supported types.
1544 std::array<DataType,6> supportedTypes =
1545 {
1546 DataType::Float32,
1547 DataType::Float16,
1548 DataType::QAsymmS8,
1549 DataType::QAsymmU8,
1550 DataType::QSymmS8,
1551 DataType::QSymmS16
1552 };
1553
1554 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1555 "Reference Fully Connected: input type not supported.");
1556
1557 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1558 "Reference Fully Connected: output type not supported.");
1559
1560 supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1561 "Reference Fully Connected: weights type not supported.");
1562
1563 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1564 "Reference Fully Connected: input and output types mismatched.");
1565
1566 supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1567 "Reference Fully Connected: weights is not a supported type.");
1568
1569 supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1570 "Reference Fully Connected: input and weights types mismatched.");
1571
1572 if (descriptor.m_BiasEnabled)
1573 {
1574 // Defined supported types for bias
1575 std::array<DataType, 5>
1576 supportedBiasTypes =
1577 {
1578 DataType::Float32,
1579 DataType::Float16,
1580 DataType::Signed32,
1581 DataType::QAsymmS8
1582 };
1583
1584 supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1585 "Reference Fully Connected: bias type not supported.");
1586
1587 supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1588 "Reference Fully Connected: bias and weight types mismatch.");
1589
1590 supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1591 "Reference Fully Connected: bias type inferred from weights is incompatible.");
1592
1593 supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1594 "Reference Fully Connected: bias must have 1 dimension.");
1595
1596 }
1597
1598 return supported;
1599}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, FullyConnectedDescriptor::m_BiasEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1601 of file RefLayerSupport.cpp.

1605{
1606 bool supported = true;
1607 std::array<DataType,7> supportedTypes =
1608 {
1609 DataType::Float32,
1610 DataType::Float16,
1611 DataType::QAsymmS8,
1612 DataType::QAsymmU8,
1613 DataType::QSymmS16,
1614 DataType::Signed32
1615 };
1616
1617 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1618 "Reference GatherNd: input type not supported");
1619
1620 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1621 "Reference GatherNd: output type not supported");
1622
1623 supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1624 "Reference GatherNd: indices (input1) type not supported");
1625
1626 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1627 "Reference GatherNd: input and output types not matching");
1628
1629 return supported;
1630}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
const GatherDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1632 of file RefLayerSupport.cpp.

1637{
1638 bool supported = true;
1639 std::array<DataType,7> supportedTypes =
1640 {
1641 DataType::Float32,
1642 DataType::Float16,
1643 DataType::QAsymmS8,
1644 DataType::QAsymmU8,
1645 DataType::QSymmS16,
1646 DataType::Signed32,
1647 DataType::Signed64
1648 };
1649
1650 IgnoreUnused(descriptor);
1651 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1652 "Reference Gather: input type not supported");
1653
1654 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1655 "Reference Gather: output type not supported");
1656
1657 supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1658 "Reference Gather: indices (input1) type not supported");
1659
1660 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1661 "Reference Gather: input and output types not matching");
1662
1663 return supported;
1664}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and armnn::Signed64.

Referenced by IsLayerSupported().

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo & input,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1666 of file RefLayerSupport.cpp.

1668{
1669 return true;
1670}

Referenced by IsLayerSupported().

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo & input,
const TensorInfo & output,
const InstanceNormalizationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1672 of file RefLayerSupport.cpp.

1676{
1677 IgnoreUnused(descriptor);
1678 // Define supported types
1679 std::array<DataType, 3> supportedTypes =
1680 {
1681 DataType::Float32,
1682 DataType::Float16
1683 };
1684
1685 bool supported = true;
1686
1687 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1688 "Reference Instance Normalization: input type not supported.");
1689
1690 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1691 "Reference Instance Normalization: output type not supported.");
1692
1693 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1694 "Reference Instance Normalization: input and output types mismatched.");
1695
1696 supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1697 "Reference Instance Normalization: input and output shapes have different "
1698 "num total elements.");
1699
1700 return supported;
1701}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by IsLayerSupported().

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo & input,
const TensorInfo & output,
const L2NormalizationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1703 of file RefLayerSupport.cpp.

1707{
1708 IgnoreUnused(descriptor);
1709 // Define supported types
1710 std::array<DataType, 6> supportedTypes =
1711 {
1712 DataType::Float32,
1713 DataType::Float16,
1714 DataType::QAsymmS8,
1715 DataType::QAsymmU8,
1716 DataType::QSymmS16
1717 };
1718
1719 bool supported = true;
1720
1721 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1722 "Reference L2normalization: input type not supported.");
1723
1724 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1725 "Reference L2normalization: output type not supported.");
1726
1727 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1728 "Reference L2normalization: input and output types mismatched.");
1729
1730 supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1731 "Reference L2normalization: input and output shapes have different "
1732 "num total elements.");
1733
1734 return supported;
1735}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType & type,
const std::vector< TensorInfo > & infos,
const BaseDescriptor & descriptor,
const Optional< LstmInputParamsInfo > & lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > & quantizedLstmParamsInfo,
Optional< std::string & > reasonIfUnsupported ) const
overridevirtual

Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.

Reimplemented from LayerSupportBase.

Definition at line 61 of file RefLayerSupport.cpp.

67{
68 switch (type)
69 {
70 case LayerType::Activation:
71 return IsActivationSupported(infos[0],
72 infos[1],
73 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
74 reasonIfUnsupported);
75 case LayerType::Addition:
76 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
77 case LayerType::ArgMinMax:
78 return IsArgMinMaxSupported(infos[0],
79 infos[1],
80 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
81 reasonIfUnsupported);
82 case LayerType::BatchMatMul:
83 return IsBatchMatMulSupported(infos[0],
84 infos[1],
85 infos[2],
86 *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
87 reasonIfUnsupported);
88 case LayerType::BatchNormalization:
89 return IsBatchNormalizationSupported(infos[0],
90 infos[1],
91 infos[2],
92 infos[3],
93 infos[4],
94 infos[5],
95 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96 (&descriptor)),
97 reasonIfUnsupported);
98 case LayerType::BatchToSpaceNd:
99 return IsBatchToSpaceNdSupported(infos[0],
100 infos[1],
101 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
102 reasonIfUnsupported);
103 case LayerType::BroadcastTo:
104 return IsBroadcastToSupported(infos[0],
105 infos[1],
106 *(PolymorphicDowncast<const BroadcastToDescriptor*>(&descriptor)),
107 reasonIfUnsupported);
108 case LayerType::Comparison:
109 return IsComparisonSupported(infos[0],
110 infos[1],
111 infos[2],
112 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
113 reasonIfUnsupported);
114 case LayerType::Concat:
115 {
116 std::vector<const TensorInfo*> inputInfos;
117 for (uint32_t i = 0; i < (infos.size() - 1); i++)
118 {
119 inputInfos.push_back(&infos[i]);
120 }
121 return IsConcatSupported(inputInfos,
122 infos[infos.size() - 1],
123 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
124 reasonIfUnsupported);
125 }
126 case LayerType::Constant:
127 return IsConstantSupported(infos[0], reasonIfUnsupported);
128 case LayerType::ConvertFp16ToFp32:
129 return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
130 case LayerType::ConvertFp32ToFp16:
131 return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
132 case LayerType::Convolution2d:
133 {
134 if (infos.size() != 4)
135 {
136 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
137 "TensorInfos should be of format: {input, output, weights, biases}.");
138 }
139
140 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
141 if (infos[3] == TensorInfo())
142 {
143 return IsConvolution2dSupported(infos[0],
144 infos[1],
145 desc,
146 infos[2],
147 EmptyOptional(),
148 reasonIfUnsupported);
149 }
150 else
151 {
152 return IsConvolution2dSupported(infos[0],
153 infos[1],
154 desc,
155 infos[2],
156 infos[3],
157 reasonIfUnsupported);
158 }
159 }
160 case LayerType::DepthToSpace:
161 return IsDepthToSpaceSupported(infos[0],
162 infos[1],
163 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
164 reasonIfUnsupported);
165 case LayerType::DepthwiseConvolution2d:
166 {
167 if (infos.size() != 4)
168 {
169 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
170 "TensorInfos should be of format: {input, output, weights, biases}.");
171 }
172
173 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
174 if (infos[3] == TensorInfo())
175 {
176 return IsDepthwiseConvolutionSupported(infos[0],
177 infos[1],
178 desc,
179 infos[2],
180 EmptyOptional(),
181 reasonIfUnsupported);
182 }
183 else
184 {
185 return IsDepthwiseConvolutionSupported(infos[0],
186 infos[1],
187 desc,
188 infos[2],
189 infos[3],
190 reasonIfUnsupported);
191 }
192 }
193 case LayerType::Dequantize:
194 return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
195 case LayerType::Division:
196 return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
197 case LayerType::ElementwiseBinary:
198 {
199 std::array<DataType, 7> supportedTypes =
200 {
201 DataType::Float32,
202 DataType::Float16,
203 DataType::QAsymmS8,
204 DataType::QAsymmU8,
205 DataType::QSymmS16,
206 DataType::Signed32
207 };
208
209 bool supported = true;
210 supported &= CheckSupportRule(TypeAnyOf(infos[0], supportedTypes), reasonIfUnsupported,
211 "Reference elementwise unary: input type not supported");
212
213 supported &= CheckSupportRule(TypeAnyOf(infos[1], supportedTypes), reasonIfUnsupported,
214 "Reference elementwise unary: input type not supported");
215
216 supported &= CheckSupportRule(TypeAnyOf(infos[2], supportedTypes), reasonIfUnsupported,
217 "Reference elementwise unary: output type not supported");
218
219 supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[1]), reasonIfUnsupported,
220 "Reference elementwise unary: input types not matching");
221
222 supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[2]), reasonIfUnsupported,
223 "Reference elementwise unary: input and output types not matching");
224
225 return supported;
226 }
227 case LayerType::ElementwiseUnary:
228 return IsElementwiseUnarySupported(infos[0],
229 infos[1],
230 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
231 reasonIfUnsupported);
232 case LayerType::Fill:
233 return IsFillSupported(infos[0],
234 infos[1],
235 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
236 reasonIfUnsupported);
237 case LayerType::Floor:
238 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
239 case LayerType::FullyConnected:
240 return IsFullyConnectedSupported(infos[0],
241 infos[1],
242 infos[2],
243 infos[3],
244 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
245 reasonIfUnsupported);
246 case LayerType::Gather:
247 return IsGatherSupported(infos[0],
248 infos[1],
249 infos[2],
250 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
251 reasonIfUnsupported);
252 case LayerType::GatherNd:
253 return IsGatherNdSupported(infos[0],
254 infos[1],
255 infos[2],
256 reasonIfUnsupported);
257 case LayerType::Input:
258 return IsInputSupported(infos[0], reasonIfUnsupported);
259 case LayerType::InstanceNormalization:
260 return IsInstanceNormalizationSupported(infos[0],
261 infos[1],
262 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
263 (&descriptor)),
264 reasonIfUnsupported);
265 case LayerType::L2Normalization:
266 return IsL2NormalizationSupported(infos[0],
267 infos[1],
268 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
269 reasonIfUnsupported);
270 case LayerType::LogicalBinary:
271 return IsLogicalBinarySupported(infos[0],
272 infos[1],
273 infos[2],
274 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
275 reasonIfUnsupported);
276 case LayerType::LogSoftmax:
277 return IsLogSoftmaxSupported(infos[0],
278 infos[1],
279 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
280 reasonIfUnsupported);
281 case LayerType::Lstm:
282 return IsLstmSupported(infos[0],
283 infos[1],
284 infos[2],
285 infos[3],
286 infos[4],
287 infos[5],
288 infos[6],
289 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
290 lstmParamsInfo.value(),
291 reasonIfUnsupported);
292 case LayerType::QLstm:
293 return IsQLstmSupported(infos[0],
294 infos[1],
295 infos[2],
296 infos[3],
297 infos[4],
298 infos[5],
299 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
300 lstmParamsInfo.value(),
301 reasonIfUnsupported);
302 case LayerType::Maximum:
303 return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
304 case LayerType::Mean:
305 return IsMeanSupported(infos[0],
306 infos[1],
307 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
308 reasonIfUnsupported);
309 case LayerType::Minimum:
310 return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
311 case LayerType::Multiplication:
312 return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
313 case LayerType::Normalization:
314 return IsNormalizationSupported(infos[0],
315 infos[1],
316 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
317 reasonIfUnsupported);
318 case LayerType::Output:
319 return IsOutputSupported(infos[0], reasonIfUnsupported);
320 case LayerType::Pad:
321 return IsPadSupported(infos[0],
322 infos[1],
323 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
324 reasonIfUnsupported);
325 case LayerType::Permute:
326 return IsPermuteSupported(infos[0],
327 infos[1],
328 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
329 reasonIfUnsupported);
330 case LayerType::Pooling2d:
331 return IsPooling2dSupported(infos[0],
332 infos[1],
333 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
334 reasonIfUnsupported);
335 case LayerType::Prelu:
336 return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
337 case LayerType::Quantize:
338 return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
339 case LayerType::Reshape:
340 return IsReshapeSupported(infos[0],
341 infos[1],
342 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
343 reasonIfUnsupported);
344 case LayerType::Resize:
345 return IsResizeSupported(infos[0],
346 infos[1],
347 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
348 reasonIfUnsupported);
349 case LayerType::ReverseV2:
350 return IsReverseV2Supported(infos[0],
351 infos[1],
352 infos[2],
353 reasonIfUnsupported);
354 case LayerType::Reduce:
355 return IsReduceSupported(infos[0],
356 infos[1],
357 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
358 reasonIfUnsupported);
359 case LayerType::ScatterNd:
360 return IsScatterNdSupported(infos[0],
361 infos[1],
362 infos[2],
363 infos[3],
364 *(PolymorphicDowncast<const ScatterNdDescriptor*>(&descriptor)),
365 reasonIfUnsupported);
366 case LayerType::Slice:
367 return IsSliceSupported(infos[0],
368 infos[1],
369 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
370 reasonIfUnsupported);
371 case LayerType::Softmax:
372 return IsSoftmaxSupported(infos[0],
373 infos[1],
374 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
375 reasonIfUnsupported);
376 case LayerType::SpaceToBatchNd:
377 return IsSpaceToBatchNdSupported(infos[0],
378 infos[1],
379 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
380 reasonIfUnsupported);
381 case LayerType::SpaceToDepth:
382 return IsSpaceToDepthSupported(infos[0],
383 infos[1],
384 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
385 reasonIfUnsupported);
386 case LayerType::Splitter:
387 {
388 std::vector<TensorInfo> outputInfos;
389 for (uint32_t i = 1; i < infos.size(); i++)
390 {
391 outputInfos.push_back(infos[i]);
392 }
393 return IsSplitterSupported(infos[0],
394 {outputInfos.begin(), outputInfos.end()},
395 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
396 reasonIfUnsupported);
397 }
398 case LayerType::Stack:
399 {
400 std::vector<const TensorInfo*> inputInfos;
401 for (uint32_t i = 0; i < infos.size() - 1; i++)
402 {
403 inputInfos.push_back(&infos[i]);
404 }
405 return IsStackSupported(inputInfos,
406 infos[infos.size() - 1],
407 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
408 reasonIfUnsupported);
409 }
410 case LayerType::StridedSlice:
411 return IsStridedSliceSupported(infos[0],
412 infos[1],
413 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
414 reasonIfUnsupported);
415 case LayerType::Subtraction:
416 return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
417 case LayerType::Tile:
418 return IsTileSupported(infos[0],
419 infos[1],
420 *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
421 reasonIfUnsupported);
422 case LayerType::Transpose:
423 return IsTransposeSupported(infos[0],
424 infos[1],
425 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
426 reasonIfUnsupported);
427 case LayerType::TransposeConvolution2d:
428 {
429 if (infos.size() != 4)
430 {
431 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
432 "TensorInfos should be of format: {input, output, weights, biases}.");
433 }
434
435 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
436 if (infos[3] == TensorInfo())
437 {
438 return IsTransposeConvolution2dSupported(infos[0],
439 infos[1],
440 desc,
441 infos[2],
442 EmptyOptional(),
443 reasonIfUnsupported);
444 }
445 else
446 {
447 return IsTransposeConvolution2dSupported(infos[0],
448 infos[1],
449 desc,
450 infos[2],
451 infos[3],
452 reasonIfUnsupported);
453 }
454 }
455 case LayerType::Cast:
456 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
457 case LayerType::ChannelShuffle:
458 return IsChannelShuffleSupported(infos[0],
459 infos[1],
460 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
461 reasonIfUnsupported);
462 case LayerType::Convolution3d:
463 {
464 if (infos.size() != 4)
465 {
466 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
467 "TensorInfos should be of format: {input, output, weights, biases}.");
468 }
469
470 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
471 if (infos[3] == TensorInfo())
472 {
473 return IsConvolution3dSupported(infos[0],
474 infos[1],
475 desc,
476 infos[2],
477 EmptyOptional(),
478 reasonIfUnsupported);
479 }
480 else
481 {
482 return IsConvolution3dSupported(infos[0],
483 infos[1],
484 desc,
485 infos[2],
486 infos[3],
487 reasonIfUnsupported);
488 }
489 }
490 case LayerType::Debug:
491 return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
492 case LayerType::DetectionPostProcess:
493 return IsDetectionPostProcessSupported(infos[0],
494 infos[1],
495 infos[2],
496 infos[3],
497 infos[4],
498 infos[5],
499 infos[6],
500 *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
501 (&descriptor)),
502 reasonIfUnsupported);
503 case LayerType::FakeQuantization:
504 return IsFakeQuantizationSupported(infos[0],
505 *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
506 reasonIfUnsupported);
507 case LayerType::MemCopy:
508 return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
509 case LayerType::Rank:
510 return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
511 case LayerType::Shape:
512 return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
513 case LayerType::UnidirectionalSequenceLstm:
514 {
515 if (infos.size() != 6)
516 {
517 throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
518 "should be of format: {input, outputStateIn, cellStateIn, "
519 "hiddenStateOutputVal, cellStateOutputVal, output}");
520 }
521 auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
522 return IsUnidirectionalSequenceLstmSupported(infos[0],
523 infos[1],
524 infos[2],
525 infos[3],
526 infos[4],
527 infos[5],
528 desc,
529 lstmParamsInfo.value(),
530 reasonIfUnsupported);
531 }
532 case LayerType::Pooling3d:
533 return IsPooling3dSupported(infos[0],
534 infos[1],
535 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
536 reasonIfUnsupported);
537 case LayerType::Map:
538 return true;
539 case LayerType::Unmap:
540 return true;
541 case LayerType::MemImport:
542 return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
543 case LayerType::Merge:
544 return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
545 case LayerType::QuantizedLstm:
546 return LayerSupportBase::IsQuantizedLstmSupported(infos[0],
547 infos[1],
548 infos[2],
549 infos[3],
550 infos[4],
551 quantizedLstmInputParamsInfo.value(),
552 reasonIfUnsupported);
553 default:
554 // layers not supported in reference by default:
555 // precompiled, standin, switch, fused
556 return false;
557 }
558}

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::BroadcastTo, armnn::Cast, armnn::ChannelShuffle, armnn::CheckSupportRule(), armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Float16, armnn::Float32, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::Input, armnn::InstanceNormalization, IsActivationSupported(), IsAdditionSupported(), IsArgMinMaxSupported(), IsBatchMatMulSupported(), IsBatchNormalizationSupported(), IsBatchToSpaceNdSupported(), IsBroadcastToSupported(), IsCastSupported(), IsChannelShuffleSupported(), IsComparisonSupported(), IsConcatSupported(), IsConstantSupported(), IsConvertFp16ToFp32Supported(), IsConvertFp32ToFp16Supported(), IsConvolution2dSupported(), IsConvolution3dSupported(), IsDebugSupported(), IsDepthToSpaceSupported(), IsDepthwiseConvolutionSupported(), IsDequantizeSupported(), IsDetectionPostProcessSupported(), IsDivisionSupported(), IsElementwiseUnarySupported(), IsFakeQuantizationSupported(), IsFillSupported(), IsFloorSupported(), IsFullyConnectedSupported(), IsGatherNdSupported(), IsGatherSupported(), IsInputSupported(), IsInstanceNormalizationSupported(), IsL2NormalizationSupported(), IsLogicalBinarySupported(), IsLogSoftmaxSupported(), IsLstmSupported(), IsMaximumSupported(), IsMeanSupported(), IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), IsMinimumSupported(), IsMultiplicationSupported(), IsNormalizationSupported(), IsOutputSupported(), IsPadSupported(), IsPermuteSupported(), IsPooling2dSupported(), IsPooling3dSupported(), IsPreluSupported(), IsQLstmSupported(), LayerSupportBase::IsQuantizedLstmSupported(), IsQuantizeSupported(), IsRankSupported(), IsReduceSupported(), IsReshapeSupported(), IsResizeSupported(), IsReverseV2Supported(), IsScatterNdSupported(), IsShapeSupported(), IsSliceSupported(), IsSoftmaxSupported(), IsSpaceToBatchNdSupported(), IsSpaceToDepthSupported(), IsSplitterSupported(), IsStackSupported(), IsStridedSliceSupported(), IsSubtractionSupported(), IsTileSupported(), IsTransposeConvolution2dSupported(), IsTransposeSupported(), IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::PolymorphicDowncast(), armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QLstm, armnn::QSymmS16, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::ReverseV2, armnn::ScatterNd, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Tile, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< IsReference, T >::value().

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
const LogicalBinaryDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported ) const

Definition at line 1737 of file RefLayerSupport.cpp.

1742{
1743 IgnoreUnused(descriptor);
1744
1745 std::array<DataType, 1> supportedTypes =
1746 {
1747 DataType::Boolean
1748 };
1749
1750 bool supported = true;
1751 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1752 "Reference LogicalBinary: input 0 type not supported");
1753 supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1754 "Reference LogicalBinary: input 1 type not supported");
1755
1756 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1757 "Reference LogicalBinary: input and output types do not match");
1758
1759 supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1760 "Reference LogicalBinary: shapes are not suitable for implicit broadcast.");
1761
1762 return supported;
1763}

References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().

Referenced by IsLayerSupported().

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo & input,
const TensorInfo & output,
const LogSoftmaxDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported ) const

Definition at line 1765 of file RefLayerSupport.cpp.

1769{
1770 IgnoreUnused(descriptor);
1771
1772 std::array<DataType, 4> supportedTypes =
1773 {
1774 DataType::Float32,
1775 DataType::Float16,
1776 DataType::QAsymmS8,
1777 DataType::QAsymmU8
1778 };
1779
1780 bool supported = true;
1781 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1782 "Reference LogSoftmax: input type not supported");
1783
1784 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1785 "Reference LogSoftmax: output type not supported");
1786
1787 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1788 "Reference LogSoftmax: input and output types do not match");
1789
1790 return supported;
1791}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, and armnn::QAsymmU8.

Referenced by IsLayerSupported().

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo & input,
const TensorInfo & outputStateIn,
const TensorInfo & cellStateIn,
const TensorInfo & scratchBuffer,
const TensorInfo & outputStateOut,
const TensorInfo & cellStateOut,
const TensorInfo & output,
const LstmDescriptor & descriptor,
const LstmInputParamsInfo & paramsInfo,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1793 of file RefLayerSupport.cpp.

1803{
1804 IgnoreUnused(descriptor);
1805 IgnoreUnused(paramsInfo);
1806
1807 bool supported = true;
1808
1809 std::array<DataType,3> supportedTypes = {
1810 DataType::Float32,
1811 DataType::QSymmS16
1812 };
1813
1814 // check inputs and outputs
1815 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1816 "Reference Lstm: input is not a supported type.");
1817 supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1818 "Reference Lstm: input and outputStateIn types are mismatched");
1819 supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1820 "Reference Lstm: input and cellStateIn types are mismatched");
1821 supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1822 "Reference Lstm: input and scratchBuffer types are mismatched");
1823 supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1824 "Reference Lstm: input and outputStateOut types are mismatched");
1825 supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1826 "Reference Lstm: input and cellStateOut types are mismatched");
1827
1828 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1829 "Reference Lstm: input and output types are mismatched");
1830 // check layer parameters
1831 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1832 "Reference Lstm: input and InputToForgetWeights types are mismatched");
1833 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1834 "Reference Lstm: input and InputToCellWeights types are mismatched");
1835 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1836 "Reference Lstm: input and InputToOutputWeights types are mismatched");
1837 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1838 "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1839 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1840 "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1841 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1842 "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1843 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1844 "Reference Lstm: input and ForgetGateBias types are mismatched");
1845 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1846 "Reference Lstm: input and CellBias types are mismatched");
1847 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1848 "Reference Lstm: input and OutputGateBias types are mismatched");
1849 if (!descriptor.m_CifgEnabled)
1850 {
1851 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1852 "Reference Lstm: input and InputToInputWeights types are mismatched");
1853 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1854 reasonIfUnsupported,
1855 "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1856 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1857 "Reference Lstm: input and InputGateBias types are mismatched");
1858 if (descriptor.m_PeepholeEnabled)
1859 {
1860 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1861 reasonIfUnsupported,
1862 "Reference Lstm: input and CellToInputWeights types are mismatched");
1863 }
1864 }
1865 if (descriptor.m_PeepholeEnabled)
1866 {
1867 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1868 "Reference Lstm: input and CellToForgetWeights types are mismatched");
1869 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1870 "Reference Lstm: input and CellToOutputWeights types are mismatched");
1871 }
1872 if (descriptor.m_ProjectionEnabled)
1873 {
1874 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1875 "Reference Lstm: input and mProjectionWeights types are mismatched");
1876 if (paramsInfo.m_ProjectionBias != nullptr)
1877 {
1878 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1879 "Reference Lstm: input and ProjectionBias types are mismatched");
1880 }
1881 }
1882 if (descriptor.m_LayerNormEnabled)
1883 {
1884 if (!descriptor.m_CifgEnabled)
1885 {
1886 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1887 reasonIfUnsupported,
1888 "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1889 }
1890 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1891 reasonIfUnsupported,
1892 "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1893 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1894 reasonIfUnsupported,
1895 "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1896 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1897 reasonIfUnsupported,
1898 "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1899 }
1900
1901 return supported;
1902}

References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1904 of file RefLayerSupport.cpp.

1908{
1909 bool supported = true;
1910
1911 std::array<DataType,7> supportedTypes = {
1912 DataType::Float32,
1913 DataType::Float16,
1914 DataType::QAsymmS8,
1915 DataType::QAsymmU8,
1916 DataType::QSymmS16,
1917 DataType::Signed32
1918 };
1919
1920 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1921 "Reference maximum: input 0 is not a supported type.");
1922
1923 supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1924 "Reference maximum: input 1 is not a supported type.");
1925
1926 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1927 "Reference maximum: output is not a supported type.");
1928
1929 supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1930 "Reference maximum: input 0 and Input 1 types are mismatched");
1931
1932 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1933 "Reference maximum: input and output types are mismatched");
1934
1935 supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1936 "Reference maximum: shapes are not suitable for implicit broadcast.");
1937
1938 return supported;
1939}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo & input,
const TensorInfo & output,
const MeanDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1941 of file RefLayerSupport.cpp.

1945{
1946 bool supported = true;
1947 std::string meanLayerStr = "Mean";
1948 std::string outputTensorStr = "output";
1949
1950 std::array<DataType,6> supportedTypes =
1951 {
1952 DataType::Float32,
1953 DataType::Float16,
1954 DataType::QAsymmS8,
1955 DataType::QAsymmU8,
1956 DataType::QSymmS8,
1957 DataType::QSymmS16
1958 };
1959
1960 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1961 "Reference Mean: input type not supported.");
1962
1963 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1964 "Reference Mean: input and output types are mismatched");
1965
1966 if (descriptor.m_KeepDims)
1967 {
1968 supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1969 reasonIfUnsupported,
1970 CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1971 output.GetNumDimensions(),
1972 meanLayerStr, outputTensorStr).data());
1973 }
1974 else if (descriptor.m_Axis.empty())
1975 {
1976 supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1977 reasonIfUnsupported,
1978 CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1979 meanLayerStr, outputTensorStr).data());
1980 }
1981 else
1982 {
1983 auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1984
1985 if (outputDim > 0)
1986 {
1987 supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1988 reasonIfUnsupported,
1989 CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1990 meanLayerStr, outputTensorStr).data());
1991 }
1992 else
1993 {
1994 supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1995 reasonIfUnsupported,
1996 CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1997 meanLayerStr, outputTensorStr).data());
1998 }
1999 }
2000
2001 return supported;
2002}
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, armnn::numeric_cast(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by IsLayerSupported().

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2004 of file RefLayerSupport.cpp.

2007{
2008 bool supported = true;
2009
2010 std::array<DataType,7> supportedTypes =
2011 {
2012 DataType::BFloat16,
2013 DataType::Float32,
2014 DataType::Float16,
2015 DataType::QAsymmS8,
2016 DataType::QAsymmU8,
2017 DataType::QSymmS16,
2018 DataType::Boolean
2019 };
2020
2021 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2022 "Reference MemCopy: input type not supported");
2023
2024 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2025 "Reference MemCopy: output type not supported");
2026
2027 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2028 "Reference MemCopy: input and output types are mismatched");
2029
2030 return supported;
2031}

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2033 of file RefLayerSupport.cpp.

2037{
2038 bool supported = true;
2039
2040 std::array<DataType,7> supportedTypes = {
2041 DataType::Float32,
2042 DataType::Float16,
2043 DataType::QAsymmS8,
2044 DataType::QAsymmU8,
2045 DataType::QSymmS16,
2046 DataType::Signed32
2047 };
2048
2049 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2050 "Reference minimum: input 0 is not a supported type.");
2051
2052 supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2053 "Reference minimum: input 1 is not a supported type.");
2054
2055 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2056 "Reference minimum: output is not a supported type.");
2057
2058 supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2059 "Reference minimum: input 0 and Input 1 types are mismatched");
2060
2061 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2062 "Reference minimum: input and output types are mismatched");
2063
2064 supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2065 "Reference minimum: shapes are not suitable for implicit broadcast.");
2066
2067 return supported;
2068}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2070 of file RefLayerSupport.cpp.

2074{
2075 bool supported = true;
2076
2077 std::array<DataType,7> supportedTypes = {
2078 DataType::Float32,
2079 DataType::Float16,
2080 DataType::QAsymmS8,
2081 DataType::QAsymmU8,
2082 DataType::QSymmS16,
2083 DataType::Signed32
2084 };
2085
2086 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2087 "Reference multiplication: input 0 is not a supported type.");
2088
2089 supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2090 "Reference multiplication: input 1 is not a supported type.");
2091
2092 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2093 "Reference multiplication: output is not a supported type.");
2094
2095 supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2096 "Reference multiplication: input 0 and Input 1 types are mismatched");
2097
2098 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2099 "Reference multiplication: input and output types are mismatched");
2100
2101 supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2102 "Reference multiplication: shapes are not suitable for implicit broadcast.");
2103
2104 return supported;
2105}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo & input,
const TensorInfo & output,
const NormalizationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2107 of file RefLayerSupport.cpp.

2111{
2112 IgnoreUnused(descriptor);
2113
2114 // Define supported types
2115 std::array<DataType, 6> supportedTypes =
2116 {
2117 DataType::Float16,
2118 DataType::Float32,
2119 DataType::QAsymmS8,
2120 DataType::QAsymmU8,
2121 DataType::QSymmS16
2122 };
2123
2124 bool supported = true;
2125
2126 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2127 "Reference normalization: input type not supported.");
2128
2129 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2130 "Reference normalization: output type not supported.");
2131
2132 supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2133 "Reference normalization: input and output shapes have different "
2134 "num total elements.");
2135
2136 return supported;
2137}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2139 of file RefLayerSupport.cpp.

2141{
2142 return true;
2143}

Referenced by IsLayerSupported().

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo & input,
const TensorInfo & output,
const PadDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2145 of file RefLayerSupport.cpp.

2149{
2150 IgnoreUnused(descriptor);
2151 bool supported = true;
2152
2153 // Define supported output and inputs types.
2154 std::array<DataType,6> supportedTypes =
2155 {
2156 DataType::Float32,
2157 DataType::Float16,
2158 DataType::QAsymmS8,
2159 DataType::QAsymmU8,
2160 DataType::QSymmS16
2161 };
2162
2163 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2164 "Reference pad: input is not a supported type.");
2165
2166 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2167 "Reference pad: output is not a supported type.");
2168
2169 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2170 "Reference pad: input and output types are mismatched.");
2171
2172 return supported;
2173}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo & input,
const TensorInfo & output,
const PermuteDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2175 of file RefLayerSupport.cpp.

2179{
2180 IgnoreUnused(descriptor);
2181 bool supported = true;
2182
2183 // Define supported output and inputs types.
2184 std::array<DataType, 6> supportedTypes =
2185 {
2186 DataType::BFloat16,
2187 DataType::Float32,
2188 DataType::Float16,
2189 DataType::QAsymmS8,
2190 DataType::QAsymmU8,
2191 DataType::QSymmS16
2192 };
2193
2194 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2195 "Reference permute: input is not a supported type.");
2196
2197 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2198 "Reference permute: output is not a supported type.");
2199
2200 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2201 "Reference permute: input and output types are mismatched.");
2202
2203 return supported;
2204}

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo & input,
const TensorInfo & output,
const Pooling2dDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2206 of file RefLayerSupport.cpp.

2210{
2211 IgnoreUnused(descriptor);
2212 bool supported = true;
2213
2214 // Define supported output and inputs types.
2215 std::array<DataType,6> supportedTypes =
2216 {
2217 DataType::Float32,
2218 DataType::Float16,
2219 DataType::QAsymmS8,
2220 DataType::QAsymmU8,
2221 DataType::QSymmS16
2222 };
2223
2224 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2225 "Reference poolind2d: input is not a supported type.");
2226
2227 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2228 "Reference poolind2d: output is not a supported type.");
2229
2230 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2231 "Reference poolind2d: input and output types are mismatched.");
2232
2233 return supported;
2234}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo & input,
const TensorInfo & output,
const Pooling3dDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2236 of file RefLayerSupport.cpp.

2240{
2241 IgnoreUnused(descriptor);
2242 bool supported = true;
2243
2244 // Define supported output and inputs types.
2245 std::array<DataType,6> supportedTypes =
2246 {
2247 DataType::Float32,
2248 DataType::Float16,
2249 DataType::QAsymmS8,
2250 DataType::QAsymmU8,
2251 DataType::QSymmS16
2252 };
2253
2254 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2255 "Reference poolind3d: input is not a supported type.");
2256
2257 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2258 "Reference poolind3d: output is not a supported type.");
2259
2260 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2261 "Reference poolind3d: input and output types are mismatched.");
2262
2263 return supported;
2264}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo & input,
const TensorInfo & alpha,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2782 of file RefLayerSupport.cpp.

2786{
2787 bool supported = true;
2788
2789 std::array<DataType, 6> supportedTypes
2790 {
2791 DataType::Float32,
2792 DataType::Float16,
2793 DataType::QAsymmS8,
2794 DataType::QAsymmU8,
2795 DataType::QSymmS16
2796 };
2797
2798 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2799 "PReLU: input is not a supported type.");
2800
2801 supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2802 "PReLU: alpha is not a supported type.");
2803
2804 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2805 "PReLU: output is not a supported type.");
2806
2807 supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2808 "PReLU: input, alpha and output types are mismatched");
2809
2810 supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2811 "PReLU: shapes are not suitable for implicit broadcast");
2812
2813 return supported;
2814}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo & input,
const TensorInfo & previousOutputIn,
const TensorInfo & previousCellStateIn,
const TensorInfo & outputStateOut,
const TensorInfo & cellStateOut,
const TensorInfo & output,
const QLstmDescriptor & descriptor,
const LstmInputParamsInfo & paramsInfo,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2267 of file RefLayerSupport.cpp.

2276{
2277 IgnoreUnused(input);
2278 IgnoreUnused(previousOutputIn);
2279 IgnoreUnused(previousCellStateIn);
2280 IgnoreUnused(outputStateOut);
2281 IgnoreUnused(cellStateOut);
2282 IgnoreUnused(output);
2283 IgnoreUnused(descriptor);
2284 IgnoreUnused(paramsInfo);
2285
2286 IgnoreUnused(reasonIfUnsupported);
2287
2288 return true;
2289}

References armnn::IgnoreUnused().

Referenced by IsLayerSupported().

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2291 of file RefLayerSupport.cpp.

2294{
2295 bool supported = true;
2296
2297 // Define supported input types.
2298 std::array<DataType,7> supportedInputTypes = {
2299 DataType::Float32,
2300 DataType::Float16,
2301 DataType::QAsymmS8,
2302 DataType::QAsymmU8,
2303 DataType::QSymmS8,
2304 DataType::QSymmS16
2305 };
2306
2307 supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2308 "Reference quantize: input type not supported.");
2309
2310 // Define supported output types.
2311 std::array<DataType,4> supportedOutputTypes = {
2312 DataType::QAsymmS8,
2313 DataType::QAsymmU8,
2314 DataType::QSymmS8,
2315 DataType::QSymmS16
2316 };
2317 supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2318 "Reference quantize: output type not supported.");
2319
2320 supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2321 "Reference quantize: input and output shapes have different num total elements.");
2322
2323 return supported;
2324}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by IsLayerSupported().

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2326 of file RefLayerSupport.cpp.

2329{
2330 IgnoreUnused(input);
2331 // Define supported output types.
2332 std::array<DataType,1> supportedOutputTypes =
2333 {
2334 DataType::Signed32,
2335 };
2336
2337 return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2338 "Reference rank: input type not supported.");
2339}

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo & input,
const TensorInfo & output,
const ReduceDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2341 of file RefLayerSupport.cpp.

2345{
2346 IgnoreUnused(descriptor);
2347 bool supported = true;
2348 std::array<DataType,7> supportedTypes =
2349 {
2350 DataType::Float32,
2351 DataType::Float16,
2352 DataType::QAsymmS8,
2353 DataType::QAsymmU8,
2354 DataType::QSymmS16,
2355 DataType::Signed32
2356 };
2357
2358 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2359 "Reference Reduce: input type not supported");
2360
2361 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2362 "Reference Reduce: output type not supported");
2363
2364 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2365 "Reference Reduce: input and output types not matching");
2366
2367 return supported;
2368}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo & input,
const TensorInfo & output,
const ReshapeDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2370 of file RefLayerSupport.cpp.

2374{
2375 IgnoreUnused(output);
2376 IgnoreUnused(descriptor);
2377 // Define supported output types.
2378 std::array<DataType,8> supportedOutputTypes =
2379 {
2380 DataType::BFloat16,
2381 DataType::Float32,
2382 DataType::Float16,
2383 DataType::Signed32,
2384 DataType::QAsymmS8,
2385 DataType::QAsymmU8,
2386 DataType::QSymmS16,
2387 DataType::Boolean
2388 };
2389
2390 return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2391 "Reference reshape: input type not supported.");
2392}

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo & input,
const TensorInfo & output,
const ResizeDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2394 of file RefLayerSupport.cpp.

2398{
2399 IgnoreUnused(descriptor);
2400 bool supported = true;
2401 std::array<DataType,7> supportedTypes =
2402 {
2403 DataType::BFloat16,
2404 DataType::Float32,
2405 DataType::Float16,
2406 DataType::QAsymmS8,
2407 DataType::QAsymmU8,
2408 DataType::QSymmS8,
2409 DataType::QSymmS16
2410 };
2411
2412 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2413 "Reference Resize: input type not supported");
2414
2415 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2416 "Reference Resize: output type not supported");
2417
2418 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2419 "Reference Resize: input and output types not matching");
2420
2421 return supported;
2422}

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by IsLayerSupported().

◆ IsReverseV2Supported()

bool IsReverseV2Supported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2424 of file RefLayerSupport.cpp.

2428{
2429 bool supported = true;
2430 // ReverseV2 is data type agnostic so it can support all the types in the Reference backend
2431 std::array<DataType,8> supportedTypes =
2432 {
2433 DataType::BFloat16,
2434 DataType::Float32,
2435 DataType::Float16,
2436 DataType::QAsymmS8,
2437 DataType::QAsymmU8,
2438 DataType::QSymmS8,
2439 DataType::QSymmS16,
2440 DataType::Signed32
2441 };
2442
2443 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2444 "Reference ReverseV2: input0 type not supported");
2445
2446 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2447 "Reference ReverseV2: output type not supported");
2448
2449 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2450 "Reference ReverseV2: input0 and output types not matching");
2451
2452 std::array<DataType,6> input2SupportedTypes =
2453 {
2454 DataType::Signed32
2455 };
2456
2457 supported &= CheckSupportRule(TypeAnyOf(input1, input2SupportedTypes), reasonIfUnsupported,
2458 "Reference ReverseV2: input1 type not supported");
2459
2460 return supported;
2461}

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsScatterNdSupported()

bool IsScatterNdSupported ( const TensorInfo & input,
const TensorInfo & indices,
const TensorInfo & updates,
const TensorInfo & output,
const ScatterNdDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2463 of file RefLayerSupport.cpp.

2469{
2470 IgnoreUnused(descriptor);
2471
2472 bool supported = true;
2473
2474 std::array<DataType, 7> supportedTypes
2475 {
2476 DataType::Float32,
2477 DataType::Float16,
2478 DataType::QAsymmS8,
2479 DataType::QAsymmU8,
2480 DataType::QSymmS8,
2481 DataType::QSymmS16,
2482 DataType::Signed32
2483 };
2484
2485 std::array<DataType, 1> indicesSupportedTypes =
2486 {
2487 DataType::Signed32
2488 };
2489
2490 supported &= CheckSupportRule(TypeAnyOf(indices, indicesSupportedTypes), reasonIfUnsupported,
2491 "ScatterNd: indices type not supported.");
2492
2493 supported &= CheckSupportRule(TypeAnyOf(updates, supportedTypes), reasonIfUnsupported,
2494 "ScatterNd: updates type not supported.");
2495
2496 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2497 "ScatterNd: output type not supported");
2498
2499 supported &= CheckSupportRule(TypesAreEqual(updates, output), reasonIfUnsupported,
2500 "ScatterNd: input and updates types are mismatched");
2501
2502 if (descriptor.m_InputEnabled)
2503 {
2504 // If the input slot is enabled, we have the input tensor in this slot
2505 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2506 "ScatterNd: input type not supported.");
2507
2508 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2509 "ScatterNd: input and output types are mismatched");
2510 }
2511 else
2512 {
2513 // If the input slot is not enabled, we have the shape tensor in this slot
2514 supported &= CheckSupportRule(TypeAnyOf(input, indicesSupportedTypes), reasonIfUnsupported,
2515 "ScatterNd: shape type not supported.");
2516 }
2517
2518 return supported;
2519}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ScatterNdDescriptor::m_InputEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsShapeSupported()

bool IsShapeSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2521 of file RefLayerSupport.cpp.

2524{
2525 IgnoreUnused(input);
2526 bool supported = true;
2527
2528 std::array<DataType, 1> supportedTypes =
2529 {
2530 DataType::Signed32
2531 };
2532
2533 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2534 "Reference Shape: output type not supported");
2535
2536 return supported;
2537}

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo & input,
const TensorInfo & output,
const SliceDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2539 of file RefLayerSupport.cpp.

2543{
2544 IgnoreUnused(descriptor);
2545 bool supported = true;
2546
2547 std::array<DataType, 5> supportedTypes =
2548 {
2549 DataType::Float32,
2550 DataType::QAsymmS8,
2551 DataType::QAsymmU8,
2552 DataType::QSymmS16,
2553 DataType::Signed32
2554 };
2555
2556 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2557 "Reference Slice: input type not supported");
2558
2559 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2560 "Reference Slice: output type not supported");
2561
2562 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2563 "Reference Slice: input and output types are mismatched");
2564
2565 return supported;
2566}

References armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo & input,
const TensorInfo & output,
const SoftmaxDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2568 of file RefLayerSupport.cpp.

2572{
2573 IgnoreUnused(descriptor);
2574 bool supported = true;
2575 std::array<DataType,7> supportedTypes =
2576 {
2577 DataType::Float32,
2578 DataType::Float16,
2579 DataType::QSymmS8,
2580 DataType::QAsymmS8,
2581 DataType::QAsymmU8,
2582 DataType::QSymmS16
2583 };
2584
2585 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2586 "Reference Softmax: output type not supported");
2587
2588 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2589 "Reference Softmax: input type not supported");
2590
2591 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2592 "Reference Softmax: input type not supported");
2593
2594 return supported;
2595}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by IsLayerSupported().

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo & input,
const TensorInfo & output,
const SpaceToBatchNdDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2597 of file RefLayerSupport.cpp.

2601{
2602 IgnoreUnused(descriptor);
2603 bool supported = true;
2604 std::array<DataType,6> supportedTypes =
2605 {
2606 DataType::Float32,
2607 DataType::Float16,
2608 DataType::QAsymmS8,
2609 DataType::QAsymmU8,
2610 DataType::QSymmS16
2611 };
2612
2613 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2614 "Reference SpaceToBatchNd: input type not supported");
2615
2616 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2617 "Reference SpaceToBatchNd: output type not supported");
2618
2619 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2620 "Reference SpaceToBatchNd: input and output types are mismatched");
2621
2622 return supported;
2623}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo & input,
const TensorInfo & output,
const SpaceToDepthDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2625 of file RefLayerSupport.cpp.

2629{
2630
2631 IgnoreUnused(descriptor);
2632 bool supported = true;
2633
2634 std::array<DataType,6> supportedTypes =
2635 {
2636 DataType::Float32,
2637 DataType::Float16,
2638 DataType::QAsymmS8,
2639 DataType::QAsymmU8,
2640 DataType::QSymmS16
2641 };
2642
2643 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2644 "Reference SpaceToDepth: input type not supported");
2645
2646 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2647 "Reference SpaceToDepth: output type not supported");
2648
2649 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2650 "Reference SpaceToDepth: input and output types are mismatched");
2651
2652 return supported;
2653}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo & input,
const std::vector< std::reference_wrapper< TensorInfo > > & outputs,
const ViewsDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2655 of file RefLayerSupport.cpp.

2659{
2660 IgnoreUnused(descriptor);
2661 bool supported = true;
2662 std::array<DataType,6> supportedTypes =
2663 {
2664 DataType::Float32,
2665 DataType::Float16,
2666 DataType::QAsymmS8,
2667 DataType::QAsymmU8,
2668 DataType::QSymmS16
2669 };
2670
2671 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2672 "Reference splitter: output type not supported");
2673 for (const TensorInfo& output : outputs)
2674 {
2675 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2676 "Reference splitter: input type not supported");
2677
2678 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2679 "Reference splitter: input and output types mismatched.");
2680 }
2681
2682 return supported;
2683}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo * > & inputs,
const TensorInfo & output,
const StackDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2685 of file RefLayerSupport.cpp.

2689{
2690 IgnoreUnused(descriptor);
2691
2692 bool supported = true;
2693 std::array<DataType,7> supportedTypes =
2694 {
2695 DataType::Float32,
2696 DataType::Float16,
2697 DataType::QAsymmS8,
2698 DataType::QAsymmU8,
2699 DataType::QSymmS16,
2700 DataType::Signed32
2701 };
2702
2703 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2704 "Reference stack: output type not supported");
2705 for (const TensorInfo* input : inputs)
2706 {
2707 supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2708 "Reference stack: input type not supported");
2709
2710 supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2711 "Reference stack: input and output types mismatched.");
2712 }
2713
2714 return supported;
2715}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo & input,
const TensorInfo & output,
const StridedSliceDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2717 of file RefLayerSupport.cpp.

2721{
2722 IgnoreUnused(descriptor);
2723 bool supported = true;
2724
2725 std::array<DataType,5> supportedTypes =
2726 {
2727 DataType::Float32,
2728 DataType::QAsymmS8,
2729 DataType::QAsymmU8,
2730 DataType::QSymmS16
2731 };
2732
2733 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2734 "Reference StridedSlice: input type not supported");
2735
2736 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2737 "Reference StridedSlice: output type not supported");
2738
2739 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2740 "Reference StridedSlice: input and output types are mismatched");
2741
2742 return supported;
2743}

References armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2745 of file RefLayerSupport.cpp.

2749{
2750 bool supported = true;
2751
2752 std::array<DataType,7> supportedTypes = {
2753 DataType::Float32,
2754 DataType::Float16,
2755 DataType::QAsymmS8,
2756 DataType::QAsymmU8,
2757 DataType::QSymmS16,
2758 DataType::Signed32
2759 };
2760
2761 supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2762 "Reference subtraction: input 0 is not a supported type.");
2763
2764 supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2765 "Reference subtraction: input 1 is not a supported type.");
2766
2767 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2768 "Reference subtraction: output is not a supported type.");
2769
2770 supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2771 "Reference subtraction: input 0 and Input 1 types are mismatched");
2772
2773 supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2774 "Reference subtraction: input and output types are mismatched");
2775
2776 supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2777 "Reference subtraction: shapes are not suitable for implicit broadcast.");
2778
2779 return supported;
2780}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by IsLayerSupported().

◆ IsTileSupported()

bool IsTileSupported ( const TensorInfo & input,
const TensorInfo & output,
const TileDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2816 of file RefLayerSupport.cpp.

2820{
2821 IgnoreUnused(descriptor);
2822
2823 bool supported = true;
2824
2825 std::array<DataType, 8> supportedTypes
2826 {
2827 DataType::Float32,
2828 DataType::Float16,
2829 DataType::QAsymmS8,
2830 DataType::QAsymmU8,
2831 DataType::QSymmS8,
2832 DataType::QSymmS16,
2833 DataType::Signed32,
2834 DataType::Signed64
2835 };
2836
2837 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2838 "Tile: input type not supported.");
2839
2840 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2841 "Tile: output type not supported");
2842
2843 return supported;
2844}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and armnn::Signed64.

Referenced by IsLayerSupported().

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo & input,
const TensorInfo & output,
const TransposeConvolution2dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2846 of file RefLayerSupport.cpp.

2852{
2853 IgnoreUnused(descriptor);
2854 bool supported = true;
2855
2856 std::array<DataType,7> supportedTypes =
2857 {
2858 DataType::Float32,
2859 DataType::Float16,
2860 DataType::QAsymmS8,
2861 DataType::QAsymmU8,
2862 DataType::QSymmS8,
2863 DataType::QSymmS16
2864 };
2865
2866 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2867 "Reference TransposeConvolution2d: input is not a supported type.");
2868
2869 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2870 "Reference TransposeConvolution2d: output is not a supported type.");
2871
2872 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2873 "Reference TransposeConvolution2d: input and output types mismatched.");
2874
2875
2876 const DataType inputType = input.GetDataType();
2877 if (IsQuantized8BitType(inputType))
2878 {
2879 std::array<DataType, 3> supportedWeightTypes =
2880 {
2881 DataType::QAsymmS8,
2882 DataType::QAsymmU8,
2883 DataType::QSymmS8
2884 };
2885
2886 supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2887 "Reference TransposeConvolution2d: weights type not supported for "
2888 "quantized input.");
2889 }
2890 else
2891 {
2892 supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2893 "Reference TransposeConvolution2d: weights is not a supported type.");
2894
2895 supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2896 "Reference TransposeConvolution2d: input and weights types mismatched.");
2897 }
2898
2899 if (biases.has_value())
2900 {
2901 std::array<DataType,4> biasesSupportedTypes =
2902 {
2903 DataType::Float32,
2904 DataType::Float16,
2905 DataType::Signed32
2906 };
2907 supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2908 "Reference TransposeConvolution2d: biases is not a supported type.");
2909 }
2910
2911 return supported;
2912}

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< IsReference, T >::value().

Referenced by IsLayerSupported().

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo & input,
const TensorInfo & output,
const TransposeDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2914 of file RefLayerSupport.cpp.

2918{
2919 IgnoreUnused(descriptor);
2920 bool supported = true;
2921
2922 // Define supported output and inputs types.
2923 std::array<DataType, 6> supportedTypes =
2924 {
2925 DataType::BFloat16,
2926 DataType::Float32,
2927 DataType::Float16,
2928 DataType::QAsymmS8,
2929 DataType::QAsymmU8,
2930 DataType::QSymmS16
2931 };
2932
2933 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2934 "Reference transpose: input is not a supported type.");
2935
2936 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2937 "Reference transpose: output is not a supported type.");
2938
2939 supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2940 "Reference transpose: input and output types are mismatched.");
2941
2942 return supported;
2943}

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by IsLayerSupported().

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo & input,
const TensorInfo & outputStateIn,
const TensorInfo & cellStateIn,
const TensorInfo & outputStateOut,
const TensorInfo & cellStateOut,
const TensorInfo & output,
const UnidirectionalSequenceLstmDescriptor & descriptor,
const LstmInputParamsInfo & paramsInfo,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 2945 of file RefLayerSupport.cpp.

2955{
2956 IgnoreUnused(descriptor);
2957 IgnoreUnused(paramsInfo);
2958 IgnoreUnused(outputStateIn);
2959 IgnoreUnused(cellStateIn);
2960 IgnoreUnused(outputStateOut);
2961 IgnoreUnused(cellStateOut);
2962 bool supported = true;
2963
2964 std::array<DataType, 2> supportedTypes =
2965 {
2966 DataType::Float32,
2967 DataType::QAsymmS8
2968 };
2969
2970 std::array<DataType, 2> supportedWeightTypes =
2971 {
2972 DataType::Float32,
2973 DataType::QAsymmS8
2974 };
2975
2976 std::array<DataType, 3> supportedBiasTypes =
2977 {
2978 DataType::Float32,
2979 DataType::QAsymmS8,
2980 DataType::Signed32
2981 };
2982
2983 // check inputs and outputs
2984 supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2985 "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2986 supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2987 "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2988
2989 // check layer parameters
2990 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2991 reasonIfUnsupported,
2992 "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2993 "is not a supported type.");
2994 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2995 reasonIfUnsupported,
2996 "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2997 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2998 reasonIfUnsupported,
2999 "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
3000 "is not a supported type.");
3001 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
3002 reasonIfUnsupported,
3003 "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
3004 "is not a supported type.");
3005 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
3006 reasonIfUnsupported,
3007 "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
3008 "is not a supported type.");
3009 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
3010 reasonIfUnsupported,
3011 "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
3012 "is not a supported type.");
3013
3014 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
3015 "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
3016 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
3017 "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
3018 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
3019 "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
3020 if (!descriptor.m_CifgEnabled)
3021 {
3022 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
3023 reasonIfUnsupported,
3024 "Reference UnidirectionalSequenceLstm: InputToInputWeights "
3025 "is not a supported type.");
3026 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
3027 reasonIfUnsupported,
3028 "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
3029 "is not a supported type.");
3030 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
3031 "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
3032 if (descriptor.m_PeepholeEnabled)
3033 {
3034 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
3035 reasonIfUnsupported,
3036 "Reference UnidirectionalSequenceLstm: CellToInputWeights "
3037 "is not a supported type.");
3038 }
3039 }
3040 if (descriptor.m_PeepholeEnabled)
3041 {
3042 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
3043 reasonIfUnsupported,
3044 "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
3045 "is not a supported type.");
3046 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
3047 reasonIfUnsupported,
3048 "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
3049 "is not a supported type.");
3050 }
3051 if (descriptor.m_ProjectionEnabled)
3052 {
3053 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
3054 reasonIfUnsupported,
3055 "Reference UnidirectionalSequenceLstm: ProjectionWeights "
3056 "is not a supported type.");
3057 if (paramsInfo.m_ProjectionBias != nullptr)
3058 {
3059 supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
3060 "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
3061 "are mismatched");
3062 }
3063 }
3064 if (descriptor.m_LayerNormEnabled)
3065 {
3066 if (!descriptor.m_CifgEnabled)
3067 {
3068 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
3069 reasonIfUnsupported,
3070 "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
3071 "is not a supported type.");
3072 }
3073 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
3074 reasonIfUnsupported,
3075 "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
3076 "is not a supported type.");
3077 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
3078 reasonIfUnsupported,
3079 "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
3080 "is not a supported type.");
3081 supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
3082 reasonIfUnsupported,
3083 "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
3084 "is not a supported type.");
3085 }
3086
3087 return supported;
3088}

References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, armnn::QAsymmS8, and armnn::Signed32.

Referenced by IsLayerSupported().


The documentation for this class was generated from the following files: