ArmNN
 24.05
RefLayerSupport Class Reference

#include <RefLayerSupport.hpp>

Inheritance diagram for RefLayerSupport:
[legend]
Collaboration diagram for RefLayerSupport:
[legend]

Public Member Functions

bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
 Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported. More...
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBroadcastToSupported (const TensorInfo &input, const TensorInfo &output, const BroadcastToDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsReverseV2Supported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsScatterNdSupported (const TensorInfo &input, const TensorInfo &indices, const TensorInfo &updates, const TensorInfo &output, const ScatterNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsTileSupported (const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
- Public Member Functions inherited from LayerSupportBase
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file RefLayerSupport.hpp.

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 560 of file RefLayerSupport.cpp.

564 {
565  bool supported = true;
566 
567  // Define supported types.
568  std::array<DataType,6> supportedTypes = {
574  };
575 
576  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
577  "Reference activation: input type not supported.");
578 
579  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
580  "Reference activation: output type not supported.");
581 
582  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
583  "Reference activation: input and output types mismatched.");
584 
585  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
586  "Reference activation: input and output shapes are of different rank.");
587 
588 
589  struct ActivationFunctionSupported : public Rule
590  {
591  ActivationFunctionSupported(const ActivationDescriptor& desc)
592  {
593  switch(desc.m_Function)
594  {
608  {
609  m_Res = true;
610  break;
611  }
612  default:
613  {
614  m_Res = false;
615  break;
616  }
617  }
618  }
619  };
620 
621  // Function is supported
622  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
623  "Reference activation: function not supported.");
624 
625  return supported;
626 }

References armnn::Abs, armnn::BoundedReLu, armnn::CheckSupportRule(), armnn::Elu, armnn::Float16, armnn::Float32, armnn::Gelu, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 628 of file RefLayerSupport.cpp.

632 {
633  bool supported = true;
634 
635  std::array<DataType,7> supportedTypes = {
642  };
643 
644  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
645  "Reference addition: input 0 is not a supported type.");
646 
647  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
648  "Reference addition: input 1 is not a supported type.");
649 
650  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
651  "Reference addition: output is not a supported type.");
652 
653  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
654  "Reference addition: input 0 and Input 1 types are mismatched");
655 
656  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
657  "Reference addition: input and output types are mismatched");
658 
659  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
660  "Reference addition: shapes are not suitable for implicit broadcast.");
661 
662  return supported;
663 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 665 of file RefLayerSupport.cpp.

668 {
669  IgnoreUnused(descriptor);
670 
671  std::array<DataType, 8> supportedInputTypes =
672  {
680  };
681 
682  std::array<DataType,2> supportedOutputTypes = {
685  };
686 
687  bool supported = true;
688 
689  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
690  "Reference ArgMinMax: input is not a supported type.");
691  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
692  "Reference ArgMinMax: output type not supported");
693 
694  return supported;
695 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and armnn::Signed64.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo inputX,
const TensorInfo inputY,
const TensorInfo output,
const BatchMatMulDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 697 of file RefLayerSupport.cpp.

702 {
703  IgnoreUnused(descriptor);
704 
705  std::array<DataType, 6> supportedTypes =
706  {
712  };
713 
714  bool supported = true;
715 
716  supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
717  "Reference batch matrix multiplication: input X is not a supported type");
718 
719  supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
720  "Reference batch matrix multiplication: input Y is not a supported type");
721 
722  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
723  "Reference batch matrix multiplication: output is not a supported type");
724 
725  supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
726  "Reference batch matrix multiplication: input X and input Y types are mismatched");
727 
728  supported &= CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
729  "Reference batch matrix multiplication: inputs and output types are mismatched");
730 
731  supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputX, 2),
732  reasonIfUnsupported,
733  "Reference batch matrix multiplication: input X is not of rank 2 or greater");
734 
735  supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputY, 2),
736  reasonIfUnsupported,
737  "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
738 
739  return supported;
740 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 742 of file RefLayerSupport.cpp.

750 {
751  IgnoreUnused(descriptor);
752 
753  std::array<DataType, 6> supportedTypes =
754  {
760  };
761 
762  bool supported = true;
763 
764  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
765  "Reference batch normalization: input is not a supported type.");
766 
767  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
768  "Reference batch normalization: output is not a supported type.");
769 
770  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
771  "Reference batch normalization: input and output types are mismatched");
772 
773  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
774  "Reference batch normalization: mean is not a supported type.");
775 
776  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
777  "Reference batch normalization: variance is not a supported type.");
778 
779  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
780  "Reference batch normalization: beta is not a supported type.");
781 
782  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
783  "Reference batch normalization: gamma is not a supported type.");
784 
785  return supported;
786 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 788 of file RefLayerSupport.cpp.

792 {
793  IgnoreUnused(descriptor);
794 
795  bool supported = true;
796 
797  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
798  std::string inputTensorStr = "input";
799  std::string outputTensorStr = "output";
800 
801  // Define supported types.
802  std::array<DataType,6> supportedTypes =
803  {
809  };
810 
811  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
812  "Reference BatchToSpaceNd: input type not supported.");
813 
814  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
815  "Reference BatchToSpaceNd: output type not supported.");
816 
817  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
818  "Reference BatchToSpaceNd: input and output types mismatched.");
819 
820  return supported;
821 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBroadcastToSupported()

bool IsBroadcastToSupported ( const TensorInfo input,
const TensorInfo output,
const BroadcastToDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 823 of file RefLayerSupport.cpp.

827 {
828  IgnoreUnused(descriptor);
829 
830  bool supported = true;
831 
832  std::array<DataType, 8> supportedTypes
833  {
842  };
843 
844  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
845  "BroadcastTo: input type not supported.");
846 
847  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
848  "BroadcastTo: output type not supported");
849 
850  return supported;
851 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and armnn::Signed64.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 853 of file RefLayerSupport.cpp.

856 {
857  std::array<DataType, 10> supportedInputTypes =
858  {
867  };
868 
869  bool supported = true;
870  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
871  "Reference cast: input is not a supported type");
872 
873 
874  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
875  "Reference cast: output is not a supported type");
876 
877  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
878  "Reference cast: input and output shapes have different number of total elements");
879 
880  return supported;
881 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and armnn::Signed64.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 883 of file RefLayerSupport.cpp.

887 {
888  IgnoreUnused(descriptor);
889  bool supported = true;
890 
891  // Define supported output and inputs types.
892  std::array<DataType, 7> supportedTypes =
893  {
900  };
901 
902  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
903  "Reference ChannelShuffle: input is not a supported type.");
904 
905  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
906  "Reference ChannelShuffle: output is not a supported type.");
907 
908  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
909  "Reference ChannelShuffle: input and output types are mismatched.");
910 
911  return supported;
912 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 915 of file RefLayerSupport.cpp.

920 {
921  IgnoreUnused(descriptor);
922  std::array<DataType, 8> supportedInputTypes =
923  {
931  };
932 
933  bool supported = true;
934  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
935  "Reference comparison: input 0 is not a supported type");
936 
937  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
938  "Reference comparison: input 0 and Input 1 types are mismatched");
939 
940  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
941  "Reference comparison: output is not of type Boolean");
942 
943  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
944  "Reference comparison: shapes are not suitable for implicit broadcast.");
945 
946  return supported;
947 }

References armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo * >  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 949 of file RefLayerSupport.cpp.

953 {
954  IgnoreUnused(descriptor);
955 
956  bool supported = true;
957  std::array<DataType,7> supportedTypes =
958  {
965  };
966 
967  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
968  "Reference concatenation: output type not supported");
969  for (const TensorInfo* input : inputs)
970  {
971  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
972  "Reference concatenation: input type not supported");
973 
974  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
975  "Reference concatenation: input and output types mismatched.");
976  }
977 
978  return supported;
979 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 981 of file RefLayerSupport.cpp.

983 {
984  std::array<DataType,8> supportedTypes =
985  {
993  };
994 
995  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
996  "Reference constant: output is not a supported type.");
997 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 999 of file RefLayerSupport.cpp.

1002 {
1003  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1004  input.GetDataType(),
1005  &TrueFunc<>,
1006  &FalseInputFuncF32<>,
1007  &FalseFuncU8<>,
1008  &FalseFuncI32<>,
1009  &FalseFuncU8<>) &&
1010  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1011  output.GetDataType(),
1012  &FalseOutputFuncF16<>,
1013  &TrueFunc<>,
1014  &FalseFuncU8<>,
1015  &FalseFuncI32<>,
1016  &FalseFuncU8<>));
1017 }

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1019 of file RefLayerSupport.cpp.

1022 {
1023  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1024  input.GetDataType(),
1025  &FalseInputFuncF16<>,
1026  &TrueFunc<>,
1027  &FalseFuncU8<>,
1028  &FalseFuncI32<>,
1029  &FalseFuncU8<>) &&
1030  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1031  output.GetDataType(),
1032  &TrueFunc<>,
1033  &FalseOutputFuncF32<>,
1034  &FalseFuncU8<>,
1035  &FalseFuncI32<>,
1036  &FalseFuncU8<>));
1037 }

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1039 of file RefLayerSupport.cpp.

1045 {
1046  bool supported = true;
1047 
1048  // Define supported types.
1049  std::array<DataType,7> supportedTypes =
1050  {
1057  };
1058 
1059  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1060  "Reference Convolution2d: input is not a supported type.");
1061 
1062  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1063  "Reference Convolution2d: output is not a supported type.");
1064 
1065  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1066  "Reference Convolution2d: input and output types mismatched.");
1067 
1068 
1069  const DataType inputType = input.GetDataType();
1070  if (IsQuantized8BitType(inputType))
1071  {
1072  std::array<DataType, 3> supportedWeightTypes =
1073  {
1077  };
1078 
1079  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1080  "Reference Convolution2d: weights type not supported for quantized input.");
1081  }
1082  else
1083  {
1084  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1085  "Reference Convolution2d: weights is not a supported type.");
1086 
1087  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1088  "Reference Convolution2d: input and weights types mismatched.");
1089  }
1090 
1091  if (biases.has_value())
1092  {
1093  std::array<DataType,4> biasesSupportedTypes =
1094  {
1098  };
1099 
1100  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1101  "Reference Convolution2d: biases is not a supported type.");
1102  }
1103  IgnoreUnused(descriptor);
1104 
1105  return supported;
1106 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1108 of file RefLayerSupport.cpp.

1114 {
1115  bool supported = true;
1116 
1117  // Define supported types.
1118  std::array<DataType,7> supportedTypes =
1119  {
1126  };
1127 
1128  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1129  "Reference Convolution3d: input is not a supported type.");
1130 
1131  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1132  "Reference Convolution3d: output is not a supported type.");
1133 
1134  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1135  "Reference Convolution3d: input and output types mismatched.");
1136 
1137  const DataType inputType = input.GetDataType();
1138  if (IsQuantized8BitType(inputType))
1139  {
1140  std::array<DataType, 3> supportedWeightTypes =
1141  {
1145  };
1146 
1147  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1148  "Reference Convolution3d: weights type not supported for quantized input.");
1149  }
1150  else
1151  {
1152  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1153  "Reference Convolution3d: weights is not a supported type.");
1154 
1155  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1156  "Reference Convolution3d: input and weights types mismatched.");
1157  }
1158 
1159  if (biases.has_value())
1160  {
1161  std::array<DataType,4> biasesSupportedTypes =
1162  {
1166  };
1167 
1168  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1169  "Reference Convolution3d: biases is not a supported type.");
1170  }
1171  IgnoreUnused(descriptor);
1172 
1173  return supported;
1174 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1176 of file RefLayerSupport.cpp.

1179 {
1180  bool supported = true;
1181 
1182  std::array<DataType, 8> supportedTypes =
1183  {
1192  };
1193 
1194  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1195  "Reference for Debug layer: input type not supported");
1196 
1197  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1198  "Reference for Debug layer: output type not supported");
1199 
1200  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1201  "Reference for Debug layer: input and output types are mismatched");
1202 
1203  return supported;
1204 }

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1206 of file RefLayerSupport.cpp.

1210 {
1211  IgnoreUnused(descriptor);
1212  bool supported = true;
1213 
1214  std::array<DataType,6> supportedTypes =
1215  {
1221  };
1222 
1223  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1224  "Reference DepthToSpace: input type not supported");
1225 
1226  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1227  "Reference DepthToSpace: output type not supported");
1228 
1229  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1230  "Reference DepthToSpace: input and output types are mismatched");
1231 
1232  return supported;
1233 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1235 of file RefLayerSupport.cpp.

1241 {
1242  IgnoreUnused(descriptor);
1243  bool supported = true;
1244 
1245  // Define supported types.
1246  std::array<DataType,7> supportedTypes =
1247  {
1254  };
1255 
1256  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1257  "Reference DepthwiseConvolution2d: input is not a supported type.");
1258 
1259  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1260  "Reference DepthwiseConvolution2d: output is not a supported type.");
1261 
1262  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1263  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1264 
1265  const DataType inputType = input.GetDataType();
1266  if (IsQuantized8BitType(inputType))
1267  {
1268  std::array<DataType, 3> supportedWeightTypes =
1269  {
1273  };
1274 
1275  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1276  "Reference DepthwiseConvolution2d: weights type not supported for "
1277  "quantized input.");
1278  }
1279  else
1280  {
1281  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1282  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1283 
1284  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1285  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1286  }
1287 
1288  if (biases.has_value())
1289  {
1290  std::array<DataType,4> biasesSupportedTypes =
1291  {
1295  };
1296  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1297  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1298  }
1299 
1300  return supported;
1301 
1302 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(), and RefLayerSupport::IsLayerSupported().

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1304 of file RefLayerSupport.cpp.

1307 {
1308  bool supported = true;
1309 
1310  std::array<DataType,5> supportedInputTypes = {
1316  };
1317 
1318  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1319  "Reference for Dequantize layer: input type not supported.");
1320 
1321  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1322  "Reference for Dequantize layer: per-axis quantized input not supported.");
1323 
1324  std::array<DataType,3> supportedOutputTypes = {
1327  };
1328 
1329  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1330  "Reference for Dequantize layer: output type not supported.");
1331 
1332  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1333  "Reference for Dequantize layer: input/output shapes have different num total "
1334  "elements.");
1335 
1336  return supported;
1337 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1339 of file RefLayerSupport.cpp.

1348 {
1349  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
1350 
1351  bool supported = true;
1352 
1353  std::array<DataType,6> supportedInputTypes =
1354  {
1360  };
1361 
1362  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1363  "Reference DetectionPostProcess: input 0 is not a supported type.");
1364 
1365  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1366  "Reference DetectionPostProcess: input 1 is not a supported type.");
1367 
1368  return supported;
1369 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1371 of file RefLayerSupport.cpp.

1377 {
1378  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
1379 }

References RefLayerSupport::IsDepthwiseConvolutionSupported().

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1381 of file RefLayerSupport.cpp.

1385 {
1386  bool supported = true;
1387 
1388  std::array<DataType,7> supportedTypes = {
1395  };
1396 
1397  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1398  "Reference division: input 0 is not a supported type.");
1399 
1400  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1401  "Reference division: input 1 is not a supported type.");
1402 
1403  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1404  "Reference division: output is not a supported type.");
1405 
1406  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1407  "Reference division: input 0 and Input 1 types are mismatched");
1408 
1409  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1410  "Reference division: input and output types are mismatched");
1411 
1412  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1413  "Reference division: shapes are not suitable for implicit broadcast.");
1414 
1415  return supported;
1416 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1418 of file RefLayerSupport.cpp.

1422 {
1423  IgnoreUnused(descriptor);
1424 
1425  std::array<DataType, 7> supportedTypes =
1426  {
1433  };
1434 
1435  std::array<DataType, 1> logicalSupportedTypes =
1436  {
1438  };
1439 
1440  bool supported = true;
1441 
1442  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1443  {
1444  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1445  "Reference elementwise unary: input type not supported");
1446 
1447  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1448  "Reference elementwise unary: output type not supported");
1449  }
1450  else
1451  {
1452  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1453  "Reference elementwise unary: input type not supported");
1454 
1455  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1456  "Reference elementwise unary: output type not supported");
1457  }
1458 
1459  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1460  "Reference elementwise unary: input and output types not matching");
1461 
1462  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1463  "Reference elementwise unary: input and output shapes"
1464  "have different number of total elements");
1465 
1466  return supported;
1467 }

References armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1469 of file RefLayerSupport.cpp.

1472 {
1473  IgnoreUnused(descriptor);
1474  bool supported = true;
1475 
1476  std::array<DataType,1> supportedTypes =
1477  {
1479  };
1480 
1481  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1482  "Reference fake quantization: input type not supported.");
1483 
1484  return supported;
1485 }

References armnn::CheckSupportRule(), armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1487 of file RefLayerSupport.cpp.

1491 {
1492  IgnoreUnused(descriptor);
1493  IgnoreUnused(output);
1494 
1495  bool supported = true;
1496 
1497  std::array<DataType,3> supportedTypes =
1498  {
1502  };
1503 
1504  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1505  "Reference Fill: input type not supported.");
1506 
1507  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1508  "Reference Fill: output type not supported.");
1509  return supported;
1510 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1512 of file RefLayerSupport.cpp.

1515 {
1516  IgnoreUnused(output);
1517  bool supported = true;
1518 
1519  std::array<DataType,3> supportedTypes =
1520  {
1523  };
1524 
1525  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1526  "Reference Floor: input type not supported.");
1527 
1528  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1529  "Reference Floor: output type not supported.");
1530 
1531  return supported;
1532 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1534 of file RefLayerSupport.cpp.

1540 {
1541  bool supported = true;
1542 
1543  // Define supported types.
1544  std::array<DataType,6> supportedTypes =
1545  {
1551  };
1552 
1553  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1554  "Reference Fully Connected: input type not supported.");
1555 
1556  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1557  "Reference Fully Connected: output type not supported.");
1558 
1559  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1560  "Reference Fully Connected: weights type not supported.");
1561 
1562  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1563  "Reference Fully Connected: input and output types mismatched.");
1564 
1565  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1566  "Reference Fully Connected: weights is not a supported type.");
1567 
1568  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1569  "Reference Fully Connected: input and weights types mismatched.");
1570 
1571  if (descriptor.m_BiasEnabled)
1572  {
1573  // Defined supported types for bias
1574  std::array<DataType, 5>
1575  supportedBiasTypes =
1576  {
1581  };
1582 
1583  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1584  "Reference Fully Connected: bias type not supported.");
1585 
1586  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1587  "Reference Fully Connected: bias and weight types mismatch.");
1588 
1589  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1590  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1591 
1592  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1593  "Reference Fully Connected: bias must have 1 dimension.");
1594 
1595  }
1596 
1597  return supported;
1598 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, FullyConnectedDescriptor::m_BiasEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1600 of file RefLayerSupport.cpp.

1604 {
1605  bool supported = true;
1606  std::array<DataType,7> supportedTypes =
1607  {
1614  };
1615 
1616  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1617  "Reference GatherNd: input type not supported");
1618 
1619  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1620  "Reference GatherNd: output type not supported");
1621 
1622  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1623  "Reference GatherNd: indices (input1) type not supported");
1624 
1625  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1626  "Reference GatherNd: input and output types not matching");
1627 
1628  return supported;
1629 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1631 of file RefLayerSupport.cpp.

1636 {
1637  bool supported = true;
1638  std::array<DataType,7> supportedTypes =
1639  {
1646  };
1647 
1648  IgnoreUnused(descriptor);
1649  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1650  "Reference Gather: input type not supported");
1651 
1652  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1653  "Reference Gather: output type not supported");
1654 
1655  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1656  "Reference Gather: indices (input1) type not supported");
1657 
1658  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1659  "Reference Gather: input and output types not matching");
1660 
1661  return supported;
1662 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1664 of file RefLayerSupport.cpp.

1666 {
1667  return true;
1668 }

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1670 of file RefLayerSupport.cpp.

1674 {
1675  IgnoreUnused(descriptor);
1676  // Define supported types
1677  std::array<DataType, 3> supportedTypes =
1678  {
1681  };
1682 
1683  bool supported = true;
1684 
1685  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1686  "Reference Instance Normalization: input type not supported.");
1687 
1688  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1689  "Reference Instance Normalization: output type not supported.");
1690 
1691  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1692  "Reference Instance Normalization: input and output types mismatched.");
1693 
1694  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1695  "Reference Instance Normalization: input and output shapes have different "
1696  "num total elements.");
1697 
1698  return supported;
1699 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1701 of file RefLayerSupport.cpp.

1705 {
1706  IgnoreUnused(descriptor);
1707  // Define supported types
1708  std::array<DataType, 6> supportedTypes =
1709  {
1715  };
1716 
1717  bool supported = true;
1718 
1719  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1720  "Reference L2normalization: input type not supported.");
1721 
1722  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1723  "Reference L2normalization: output type not supported.");
1724 
1725  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1726  "Reference L2normalization: input and output types mismatched.");
1727 
1728  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1729  "Reference L2normalization: input and output shapes have different "
1730  "num total elements.");
1731 
1732  return supported;
1733 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmParamsInfo,
Optional< std::string & >  reasonIfUnsupported 
) const
overridevirtual

Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.

Reimplemented from LayerSupportBase.

Definition at line 61 of file RefLayerSupport.cpp.

67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
74  reasonIfUnsupported);
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
81  reasonIfUnsupported);
83  return IsBatchMatMulSupported(infos[0],
84  infos[1],
85  infos[2],
86  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
87  reasonIfUnsupported);
89  return IsBatchNormalizationSupported(infos[0],
90  infos[1],
91  infos[2],
92  infos[3],
93  infos[4],
94  infos[5],
95  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96  (&descriptor)),
97  reasonIfUnsupported);
99  return IsBatchToSpaceNdSupported(infos[0],
100  infos[1],
101  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
102  reasonIfUnsupported);
104  return IsBroadcastToSupported(infos[0],
105  infos[1],
106  *(PolymorphicDowncast<const BroadcastToDescriptor*>(&descriptor)),
107  reasonIfUnsupported);
109  return IsComparisonSupported(infos[0],
110  infos[1],
111  infos[2],
112  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
113  reasonIfUnsupported);
114  case LayerType::Concat:
115  {
116  std::vector<const TensorInfo*> inputInfos;
117  for (uint32_t i = 0; i < (infos.size() - 1); i++)
118  {
119  inputInfos.push_back(&infos[i]);
120  }
121  return IsConcatSupported(inputInfos,
122  infos[infos.size() - 1],
123  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
124  reasonIfUnsupported);
125  }
126  case LayerType::Constant:
127  return IsConstantSupported(infos[0], reasonIfUnsupported);
129  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
131  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
133  {
134  if (infos.size() != 4)
135  {
136  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
137  "TensorInfos should be of format: {input, output, weights, biases}.");
138  }
139 
140  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
141  if (infos[3] == TensorInfo())
142  {
143  return IsConvolution2dSupported(infos[0],
144  infos[1],
145  desc,
146  infos[2],
147  EmptyOptional(),
148  reasonIfUnsupported);
149  }
150  else
151  {
152  return IsConvolution2dSupported(infos[0],
153  infos[1],
154  desc,
155  infos[2],
156  infos[3],
157  reasonIfUnsupported);
158  }
159  }
161  return IsDepthToSpaceSupported(infos[0],
162  infos[1],
163  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
164  reasonIfUnsupported);
166  {
167  if (infos.size() != 4)
168  {
169  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
170  "TensorInfos should be of format: {input, output, weights, biases}.");
171  }
172 
173  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
174  if (infos[3] == TensorInfo())
175  {
176  return IsDepthwiseConvolutionSupported(infos[0],
177  infos[1],
178  desc,
179  infos[2],
180  EmptyOptional(),
181  reasonIfUnsupported);
182  }
183  else
184  {
185  return IsDepthwiseConvolutionSupported(infos[0],
186  infos[1],
187  desc,
188  infos[2],
189  infos[3],
190  reasonIfUnsupported);
191  }
192  }
194  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
195  case LayerType::Division:
196  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
198  {
199  std::array<DataType, 7> supportedTypes =
200  {
207  };
208 
209  bool supported = true;
210  supported &= CheckSupportRule(TypeAnyOf(infos[0], supportedTypes), reasonIfUnsupported,
211  "Reference elementwise unary: input type not supported");
212 
213  supported &= CheckSupportRule(TypeAnyOf(infos[1], supportedTypes), reasonIfUnsupported,
214  "Reference elementwise unary: input type not supported");
215 
216  supported &= CheckSupportRule(TypeAnyOf(infos[2], supportedTypes), reasonIfUnsupported,
217  "Reference elementwise unary: output type not supported");
218 
219  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[1]), reasonIfUnsupported,
220  "Reference elementwise unary: input types not matching");
221 
222  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[2]), reasonIfUnsupported,
223  "Reference elementwise unary: input and output types not matching");
224 
225  return supported;
226  }
228  return IsElementwiseUnarySupported(infos[0],
229  infos[1],
230  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
231  reasonIfUnsupported);
232  case LayerType::Fill:
233  return IsFillSupported(infos[0],
234  infos[1],
235  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
236  reasonIfUnsupported);
237  case LayerType::Floor:
238  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
240  return IsFullyConnectedSupported(infos[0],
241  infos[1],
242  infos[2],
243  infos[3],
244  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
245  reasonIfUnsupported);
246  case LayerType::Gather:
247  return IsGatherSupported(infos[0],
248  infos[1],
249  infos[2],
250  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
251  reasonIfUnsupported);
252  case LayerType::GatherNd:
253  return IsGatherNdSupported(infos[0],
254  infos[1],
255  infos[2],
256  reasonIfUnsupported);
257  case LayerType::Input:
258  return IsInputSupported(infos[0], reasonIfUnsupported);
260  return IsInstanceNormalizationSupported(infos[0],
261  infos[1],
262  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
263  (&descriptor)),
264  reasonIfUnsupported);
266  return IsL2NormalizationSupported(infos[0],
267  infos[1],
268  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
269  reasonIfUnsupported);
271  return IsLogicalBinarySupported(infos[0],
272  infos[1],
273  infos[2],
274  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
275  reasonIfUnsupported);
277  return IsLogSoftmaxSupported(infos[0],
278  infos[1],
279  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
280  reasonIfUnsupported);
281  case LayerType::Lstm:
282  return IsLstmSupported(infos[0],
283  infos[1],
284  infos[2],
285  infos[3],
286  infos[4],
287  infos[5],
288  infos[6],
289  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
290  lstmParamsInfo.value(),
291  reasonIfUnsupported);
292  case LayerType::QLstm:
293  return IsQLstmSupported(infos[0],
294  infos[1],
295  infos[2],
296  infos[3],
297  infos[4],
298  infos[5],
299  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
300  lstmParamsInfo.value(),
301  reasonIfUnsupported);
302  case LayerType::Maximum:
303  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
304  case LayerType::Mean:
305  return IsMeanSupported(infos[0],
306  infos[1],
307  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
308  reasonIfUnsupported);
309  case LayerType::Minimum:
310  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
312  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
314  return IsNormalizationSupported(infos[0],
315  infos[1],
316  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
317  reasonIfUnsupported);
318  case LayerType::Output:
319  return IsOutputSupported(infos[0], reasonIfUnsupported);
320  case LayerType::Pad:
321  return IsPadSupported(infos[0],
322  infos[1],
323  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
324  reasonIfUnsupported);
325  case LayerType::Permute:
326  return IsPermuteSupported(infos[0],
327  infos[1],
328  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
329  reasonIfUnsupported);
331  return IsPooling2dSupported(infos[0],
332  infos[1],
333  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
334  reasonIfUnsupported);
335  case LayerType::Prelu:
336  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
337  case LayerType::Quantize:
338  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
339  case LayerType::Reshape:
340  return IsReshapeSupported(infos[0],
341  infos[1],
342  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
343  reasonIfUnsupported);
344  case LayerType::Resize:
345  return IsResizeSupported(infos[0],
346  infos[1],
347  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
348  reasonIfUnsupported);
350  return IsReverseV2Supported(infos[0],
351  infos[1],
352  infos[2],
353  reasonIfUnsupported);
354  case LayerType::Reduce:
355  return IsReduceSupported(infos[0],
356  infos[1],
357  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
358  reasonIfUnsupported);
360  return IsScatterNdSupported(infos[0],
361  infos[1],
362  infos[2],
363  infos[3],
364  *(PolymorphicDowncast<const ScatterNdDescriptor*>(&descriptor)),
365  reasonIfUnsupported);
366  case LayerType::Slice:
367  return IsSliceSupported(infos[0],
368  infos[1],
369  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
370  reasonIfUnsupported);
371  case LayerType::Softmax:
372  return IsSoftmaxSupported(infos[0],
373  infos[1],
374  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
375  reasonIfUnsupported);
377  return IsSpaceToBatchNdSupported(infos[0],
378  infos[1],
379  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
380  reasonIfUnsupported);
382  return IsSpaceToDepthSupported(infos[0],
383  infos[1],
384  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
385  reasonIfUnsupported);
386  case LayerType::Splitter:
387  {
388  std::vector<TensorInfo> outputInfos;
389  for (uint32_t i = 1; i < infos.size(); i++)
390  {
391  outputInfos.push_back(infos[i]);
392  }
393  return IsSplitterSupported(infos[0],
394  {outputInfos.begin(), outputInfos.end()},
395  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
396  reasonIfUnsupported);
397  }
398  case LayerType::Stack:
399  {
400  std::vector<const TensorInfo*> inputInfos;
401  for (uint32_t i = 0; i < infos.size() - 1; i++)
402  {
403  inputInfos.push_back(&infos[i]);
404  }
405  return IsStackSupported(inputInfos,
406  infos[infos.size() - 1],
407  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
408  reasonIfUnsupported);
409  }
411  return IsStridedSliceSupported(infos[0],
412  infos[1],
413  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
414  reasonIfUnsupported);
416  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
417  case LayerType::Tile:
418  return IsTileSupported(infos[0],
419  infos[1],
420  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
421  reasonIfUnsupported);
423  return IsTransposeSupported(infos[0],
424  infos[1],
425  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
426  reasonIfUnsupported);
428  {
429  if (infos.size() != 4)
430  {
431  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
432  "TensorInfos should be of format: {input, output, weights, biases}.");
433  }
434 
435  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
436  if (infos[3] == TensorInfo())
437  {
438  return IsTransposeConvolution2dSupported(infos[0],
439  infos[1],
440  desc,
441  infos[2],
442  EmptyOptional(),
443  reasonIfUnsupported);
444  }
445  else
446  {
447  return IsTransposeConvolution2dSupported(infos[0],
448  infos[1],
449  desc,
450  infos[2],
451  infos[3],
452  reasonIfUnsupported);
453  }
454  }
455  case LayerType::Cast:
456  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
458  return IsChannelShuffleSupported(infos[0],
459  infos[1],
460  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
461  reasonIfUnsupported);
463  {
464  if (infos.size() != 4)
465  {
466  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
467  "TensorInfos should be of format: {input, output, weights, biases}.");
468  }
469 
470  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
471  if (infos[3] == TensorInfo())
472  {
473  return IsConvolution3dSupported(infos[0],
474  infos[1],
475  desc,
476  infos[2],
477  EmptyOptional(),
478  reasonIfUnsupported);
479  }
480  else
481  {
482  return IsConvolution3dSupported(infos[0],
483  infos[1],
484  desc,
485  infos[2],
486  infos[3],
487  reasonIfUnsupported);
488  }
489  }
490  case LayerType::Debug:
491  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
493  return IsDetectionPostProcessSupported(infos[0],
494  infos[1],
495  infos[2],
496  infos[3],
497  infos[4],
498  infos[5],
499  infos[6],
500  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
501  (&descriptor)),
502  reasonIfUnsupported);
504  return IsFakeQuantizationSupported(infos[0],
505  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
506  reasonIfUnsupported);
507  case LayerType::MemCopy:
508  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
509  case LayerType::Rank:
510  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
511  case LayerType::Shape:
512  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
514  {
515  if (infos.size() != 6)
516  {
517  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
518  "should be of format: {input, outputStateIn, cellStateIn, "
519  "hiddenStateOutputVal, cellStateOutputVal, output}");
520  }
521  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
523  infos[1],
524  infos[2],
525  infos[3],
526  infos[4],
527  infos[5],
528  desc,
529  lstmParamsInfo.value(),
530  reasonIfUnsupported);
531  }
533  return IsPooling3dSupported(infos[0],
534  infos[1],
535  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
536  reasonIfUnsupported);
537  case LayerType::Map:
538  return true;
539  case LayerType::Unmap:
540  return true;
542  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
543  case LayerType::Merge:
544  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
547  infos[1],
548  infos[2],
549  infos[3],
550  infos[4],
551  quantizedLstmInputParamsInfo.value(),
552  reasonIfUnsupported);
553  default:
554  // layers not supported in reference by default:
555  // precompiled, standin, switch, fused
556  return false;
557  }
558 }

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::BroadcastTo, armnn::Cast, armnn::ChannelShuffle, armnn::CheckSupportRule(), armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Float16, armnn::Float32, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::Input, armnn::InstanceNormalization, RefLayerSupport::IsActivationSupported(), RefLayerSupport::IsAdditionSupported(), RefLayerSupport::IsArgMinMaxSupported(), RefLayerSupport::IsBatchMatMulSupported(), RefLayerSupport::IsBatchNormalizationSupported(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsBroadcastToSupported(), RefLayerSupport::IsCastSupported(), RefLayerSupport::IsChannelShuffleSupported(), RefLayerSupport::IsComparisonSupported(), RefLayerSupport::IsConcatSupported(), RefLayerSupport::IsConstantSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDebugSupported(), RefLayerSupport::IsDepthToSpaceSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), RefLayerSupport::IsDequantizeSupported(), RefLayerSupport::IsDetectionPostProcessSupported(), RefLayerSupport::IsDivisionSupported(), RefLayerSupport::IsElementwiseUnarySupported(), RefLayerSupport::IsFakeQuantizationSupported(), RefLayerSupport::IsFillSupported(), RefLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), RefLayerSupport::IsGatherNdSupported(), RefLayerSupport::IsGatherSupported(), RefLayerSupport::IsInputSupported(), RefLayerSupport::IsInstanceNormalizationSupported(), RefLayerSupport::IsL2NormalizationSupported(), RefLayerSupport::IsLogicalBinarySupported(), RefLayerSupport::IsLogSoftmaxSupported(), RefLayerSupport::IsLstmSupported(), RefLayerSupport::IsMaximumSupported(), RefLayerSupport::IsMeanSupported(), RefLayerSupport::IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), RefLayerSupport::IsMinimumSupported(), RefLayerSupport::IsMultiplicationSupported(), RefLayerSupport::IsNormalizationSupported(), RefLayerSupport::IsOutputSupported(), RefLayerSupport::IsPadSupported(), RefLayerSupport::IsPermuteSupported(), RefLayerSupport::IsPooling2dSupported(), RefLayerSupport::IsPooling3dSupported(), RefLayerSupport::IsPreluSupported(), RefLayerSupport::IsQLstmSupported(), LayerSupportBase::IsQuantizedLstmSupported(), RefLayerSupport::IsQuantizeSupported(), RefLayerSupport::IsRankSupported(), RefLayerSupport::IsReduceSupported(), RefLayerSupport::IsReshapeSupported(), RefLayerSupport::IsResizeSupported(), RefLayerSupport::IsReverseV2Supported(), RefLayerSupport::IsScatterNdSupported(), RefLayerSupport::IsShapeSupported(), RefLayerSupport::IsSliceSupported(), RefLayerSupport::IsSoftmaxSupported(), RefLayerSupport::IsSpaceToBatchNdSupported(), RefLayerSupport::IsSpaceToDepthSupported(), RefLayerSupport::IsSplitterSupported(), RefLayerSupport::IsStackSupported(), RefLayerSupport::IsStridedSliceSupported(), RefLayerSupport::IsSubtractionSupported(), RefLayerSupport::IsTileSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), RefLayerSupport::IsTransposeSupported(), RefLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QLstm, armnn::QSymmS16, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::ReverseV2, armnn::ScatterNd, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Tile, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1735 of file RefLayerSupport.cpp.

1740 {
1741  IgnoreUnused(descriptor);
1742 
1743  std::array<DataType, 1> supportedTypes =
1744  {
1746  };
1747 
1748  bool supported = true;
1749  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1750  "Reference LogicalBinary: input 0 type not supported");
1751  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1752  "Reference LogicalBinary: input 1 type not supported");
1753 
1754  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1755  "Reference LogicalBinary: input and output types do not match");
1756 
1757  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1758  "Reference LogicalBinary: shapes are not suitable for implicit broadcast.");
1759 
1760  return supported;
1761 }

References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1763 of file RefLayerSupport.cpp.

1767 {
1768  IgnoreUnused(descriptor);
1769 
1770  std::array<DataType, 3> supportedTypes =
1771  {
1774  };
1775 
1776  bool supported = true;
1777  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1778  "Reference LogSoftmax: input type not supported");
1779 
1780  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1781  "Reference LogSoftmax: output type not supported");
1782 
1783  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1784  "Reference LogSoftmax: input and output types do not match");
1785 
1786  return supported;
1787 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1789 of file RefLayerSupport.cpp.

1799 {
1800  IgnoreUnused(descriptor);
1801  IgnoreUnused(paramsInfo);
1802 
1803  bool supported = true;
1804 
1805  std::array<DataType,3> supportedTypes = {
1808  };
1809 
1810  // check inputs and outputs
1811  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1812  "Reference Lstm: input is not a supported type.");
1813  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1814  "Reference Lstm: input and outputStateIn types are mismatched");
1815  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1816  "Reference Lstm: input and cellStateIn types are mismatched");
1817  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1818  "Reference Lstm: input and scratchBuffer types are mismatched");
1819  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1820  "Reference Lstm: input and outputStateOut types are mismatched");
1821  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1822  "Reference Lstm: input and cellStateOut types are mismatched");
1823 
1824  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1825  "Reference Lstm: input and output types are mismatched");
1826  // check layer parameters
1827  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1828  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1829  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1830  "Reference Lstm: input and InputToCellWeights types are mismatched");
1831  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1832  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1833  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1834  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1835  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1836  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1837  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1838  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1839  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1840  "Reference Lstm: input and ForgetGateBias types are mismatched");
1841  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1842  "Reference Lstm: input and CellBias types are mismatched");
1843  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1844  "Reference Lstm: input and OutputGateBias types are mismatched");
1845  if (!descriptor.m_CifgEnabled)
1846  {
1847  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1848  "Reference Lstm: input and InputToInputWeights types are mismatched");
1849  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1850  reasonIfUnsupported,
1851  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1852  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1853  "Reference Lstm: input and InputGateBias types are mismatched");
1854  if (descriptor.m_PeepholeEnabled)
1855  {
1856  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1857  reasonIfUnsupported,
1858  "Reference Lstm: input and CellToInputWeights types are mismatched");
1859  }
1860  }
1861  if (descriptor.m_PeepholeEnabled)
1862  {
1863  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1864  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1865  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1866  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1867  }
1868  if (descriptor.m_ProjectionEnabled)
1869  {
1870  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1871  "Reference Lstm: input and mProjectionWeights types are mismatched");
1872  if (paramsInfo.m_ProjectionBias != nullptr)
1873  {
1874  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1875  "Reference Lstm: input and ProjectionBias types are mismatched");
1876  }
1877  }
1878  if (descriptor.m_LayerNormEnabled)
1879  {
1880  if (!descriptor.m_CifgEnabled)
1881  {
1882  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1883  reasonIfUnsupported,
1884  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1885  }
1886  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1887  reasonIfUnsupported,
1888  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1889  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1890  reasonIfUnsupported,
1891  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1892  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1893  reasonIfUnsupported,
1894  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1895  }
1896 
1897  return supported;
1898 }

References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1900 of file RefLayerSupport.cpp.

1904 {
1905  bool supported = true;
1906 
1907  std::array<DataType,7> supportedTypes = {
1914  };
1915 
1916  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1917  "Reference maximum: input 0 is not a supported type.");
1918 
1919  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1920  "Reference maximum: input 1 is not a supported type.");
1921 
1922  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1923  "Reference maximum: output is not a supported type.");
1924 
1925  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1926  "Reference maximum: input 0 and Input 1 types are mismatched");
1927 
1928  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1929  "Reference maximum: input and output types are mismatched");
1930 
1931  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1932  "Reference maximum: shapes are not suitable for implicit broadcast.");
1933 
1934  return supported;
1935 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1937 of file RefLayerSupport.cpp.

1941 {
1942  bool supported = true;
1943  std::string meanLayerStr = "Mean";
1944  std::string outputTensorStr = "output";
1945 
1946  std::array<DataType,6> supportedTypes =
1947  {
1953  };
1954 
1955  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1956  "Reference Mean: input type not supported.");
1957 
1958  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1959  "Reference Mean: input and output types are mismatched");
1960 
1961  if (descriptor.m_KeepDims)
1962  {
1963  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1964  reasonIfUnsupported,
1965  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1966  output.GetNumDimensions(),
1967  meanLayerStr, outputTensorStr).data());
1968  }
1969  else if (descriptor.m_Axis.empty())
1970  {
1971  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1972  reasonIfUnsupported,
1973  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1974  meanLayerStr, outputTensorStr).data());
1975  }
1976  else
1977  {
1978  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1979 
1980  if (outputDim > 0)
1981  {
1982  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1983  reasonIfUnsupported,
1984  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1985  meanLayerStr, outputTensorStr).data());
1986  }
1987  else
1988  {
1989  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1990  reasonIfUnsupported,
1991  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1992  meanLayerStr, outputTensorStr).data());
1993  }
1994  }
1995 
1996  return supported;
1997 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1999 of file RefLayerSupport.cpp.

2002 {
2003  bool supported = true;
2004 
2005  std::array<DataType,7> supportedTypes =
2006  {
2014  };
2015 
2016  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2017  "Reference MemCopy: input type not supported");
2018 
2019  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2020  "Reference MemCopy: output type not supported");
2021 
2022  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2023  "Reference MemCopy: input and output types are mismatched");
2024 
2025  return supported;
2026 }

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2028 of file RefLayerSupport.cpp.

2032 {
2033  bool supported = true;
2034 
2035  std::array<DataType,7> supportedTypes = {
2042  };
2043 
2044  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2045  "Reference minimum: input 0 is not a supported type.");
2046 
2047  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2048  "Reference minimum: input 1 is not a supported type.");
2049 
2050  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2051  "Reference minimum: output is not a supported type.");
2052 
2053  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2054  "Reference minimum: input 0 and Input 1 types are mismatched");
2055 
2056  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2057  "Reference minimum: input and output types are mismatched");
2058 
2059  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2060  "Reference minimum: shapes are not suitable for implicit broadcast.");
2061 
2062  return supported;
2063 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2065 of file RefLayerSupport.cpp.

2069 {
2070  bool supported = true;
2071 
2072  std::array<DataType,7> supportedTypes = {
2079  };
2080 
2081  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2082  "Reference multiplication: input 0 is not a supported type.");
2083 
2084  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2085  "Reference multiplication: input 1 is not a supported type.");
2086 
2087  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2088  "Reference multiplication: output is not a supported type.");
2089 
2090  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2091  "Reference multiplication: input 0 and Input 1 types are mismatched");
2092 
2093  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2094  "Reference multiplication: input and output types are mismatched");
2095 
2096  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2097  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2098 
2099  return supported;
2100 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2102 of file RefLayerSupport.cpp.

2106 {
2107  IgnoreUnused(descriptor);
2108 
2109  // Define supported types
2110  std::array<DataType, 6> supportedTypes =
2111  {
2117  };
2118 
2119  bool supported = true;
2120 
2121  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2122  "Reference normalization: input type not supported.");
2123 
2124  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2125  "Reference normalization: output type not supported.");
2126 
2127  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2128  "Reference normalization: input and output shapes have different "
2129  "num total elements.");
2130 
2131  return supported;
2132 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2134 of file RefLayerSupport.cpp.

2136 {
2137  return true;
2138 }

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2140 of file RefLayerSupport.cpp.

2144 {
2145  IgnoreUnused(descriptor);
2146  bool supported = true;
2147 
2148  // Define supported output and inputs types.
2149  std::array<DataType,6> supportedTypes =
2150  {
2156  };
2157 
2158  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2159  "Reference pad: input is not a supported type.");
2160 
2161  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2162  "Reference pad: output is not a supported type.");
2163 
2164  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2165  "Reference pad: input and output types are mismatched.");
2166 
2167  return supported;
2168 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2170 of file RefLayerSupport.cpp.

2174 {
2175  IgnoreUnused(descriptor);
2176  bool supported = true;
2177 
2178  // Define supported output and inputs types.
2179  std::array<DataType, 6> supportedTypes =
2180  {
2187  };
2188 
2189  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2190  "Reference permute: input is not a supported type.");
2191 
2192  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2193  "Reference permute: output is not a supported type.");
2194 
2195  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2196  "Reference permute: input and output types are mismatched.");
2197 
2198  return supported;
2199 }

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2201 of file RefLayerSupport.cpp.

2205 {
2206  IgnoreUnused(descriptor);
2207  bool supported = true;
2208 
2209  // Define supported output and inputs types.
2210  std::array<DataType,6> supportedTypes =
2211  {
2217  };
2218 
2219  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2220  "Reference poolind2d: input is not a supported type.");
2221 
2222  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2223  "Reference poolind2d: output is not a supported type.");
2224 
2225  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2226  "Reference poolind2d: input and output types are mismatched.");
2227 
2228  return supported;
2229 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2231 of file RefLayerSupport.cpp.

2235 {
2236  IgnoreUnused(descriptor);
2237  bool supported = true;
2238 
2239  // Define supported output and inputs types.
2240  std::array<DataType,6> supportedTypes =
2241  {
2247  };
2248 
2249  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2250  "Reference poolind3d: input is not a supported type.");
2251 
2252  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2253  "Reference poolind3d: output is not a supported type.");
2254 
2255  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2256  "Reference poolind3d: input and output types are mismatched.");
2257 
2258  return supported;
2259 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2777 of file RefLayerSupport.cpp.

2781 {
2782  bool supported = true;
2783 
2784  std::array<DataType, 6> supportedTypes
2785  {
2791  };
2792 
2793  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2794  "PReLU: input is not a supported type.");
2795 
2796  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2797  "PReLU: alpha is not a supported type.");
2798 
2799  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2800  "PReLU: output is not a supported type.");
2801 
2802  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2803  "PReLU: input, alpha and output types are mismatched");
2804 
2805  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2806  "PReLU: shapes are not suitable for implicit broadcast");
2807 
2808  return supported;
2809 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2262 of file RefLayerSupport.cpp.

2271 {
2272  IgnoreUnused(input);
2273  IgnoreUnused(previousOutputIn);
2274  IgnoreUnused(previousCellStateIn);
2275  IgnoreUnused(outputStateOut);
2276  IgnoreUnused(cellStateOut);
2277  IgnoreUnused(output);
2278  IgnoreUnused(descriptor);
2279  IgnoreUnused(paramsInfo);
2280 
2281  IgnoreUnused(reasonIfUnsupported);
2282 
2283  return true;
2284 }

References armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2286 of file RefLayerSupport.cpp.

2289 {
2290  bool supported = true;
2291 
2292  // Define supported input types.
2293  std::array<DataType,7> supportedInputTypes = {
2300  };
2301 
2302  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2303  "Reference quantize: input type not supported.");
2304 
2305  // Define supported output types.
2306  std::array<DataType,4> supportedOutputTypes = {
2311  };
2312  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2313  "Reference quantize: output type not supported.");
2314 
2315  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2316  "Reference quantize: input and output shapes have different num total elements.");
2317 
2318  return supported;
2319 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2321 of file RefLayerSupport.cpp.

2324 {
2325  IgnoreUnused(input);
2326  // Define supported output types.
2327  std::array<DataType,1> supportedOutputTypes =
2328  {
2330  };
2331 
2332  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2333  "Reference rank: input type not supported.");
2334 }

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2336 of file RefLayerSupport.cpp.

2340 {
2341  IgnoreUnused(descriptor);
2342  bool supported = true;
2343  std::array<DataType,7> supportedTypes =
2344  {
2351  };
2352 
2353  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2354  "Reference Reduce: input type not supported");
2355 
2356  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2357  "Reference Reduce: output type not supported");
2358 
2359  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2360  "Reference Reduce: input and output types not matching");
2361 
2362  return supported;
2363 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2365 of file RefLayerSupport.cpp.

2369 {
2370  IgnoreUnused(output);
2371  IgnoreUnused(descriptor);
2372  // Define supported output types.
2373  std::array<DataType,8> supportedOutputTypes =
2374  {
2383  };
2384 
2385  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2386  "Reference reshape: input type not supported.");
2387 }

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2389 of file RefLayerSupport.cpp.

2393 {
2394  IgnoreUnused(descriptor);
2395  bool supported = true;
2396  std::array<DataType,7> supportedTypes =
2397  {
2405  };
2406 
2407  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2408  "Reference Resize: input type not supported");
2409 
2410  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2411  "Reference Resize: output type not supported");
2412 
2413  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2414  "Reference Resize: input and output types not matching");
2415 
2416  return supported;
2417 }

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsReverseV2Supported()

bool IsReverseV2Supported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2419 of file RefLayerSupport.cpp.

2423 {
2424  bool supported = true;
2425  // ReverseV2 is data type agnostic so it can support all the types in the Reference backend
2426  std::array<DataType,8> supportedTypes =
2427  {
2436  };
2437 
2438  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2439  "Reference ReverseV2: input0 type not supported");
2440 
2441  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2442  "Reference ReverseV2: output type not supported");
2443 
2444  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2445  "Reference ReverseV2: input0 and output types not matching");
2446 
2447  std::array<DataType,6> input2SupportedTypes =
2448  {
2450  };
2451 
2452  supported &= CheckSupportRule(TypeAnyOf(input1, input2SupportedTypes), reasonIfUnsupported,
2453  "Reference ReverseV2: input1 type not supported");
2454 
2455  return supported;
2456 }

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsScatterNdSupported()

bool IsScatterNdSupported ( const TensorInfo input,
const TensorInfo indices,
const TensorInfo updates,
const TensorInfo output,
const ScatterNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2458 of file RefLayerSupport.cpp.

2464 {
2465  IgnoreUnused(descriptor);
2466 
2467  bool supported = true;
2468 
2469  std::array<DataType, 7> supportedTypes
2470  {
2478  };
2479 
2480  std::array<DataType, 1> indicesSupportedTypes =
2481  {
2483  };
2484 
2485  supported &= CheckSupportRule(TypeAnyOf(indices, indicesSupportedTypes), reasonIfUnsupported,
2486  "ScatterNd: indices type not supported.");
2487 
2488  supported &= CheckSupportRule(TypeAnyOf(updates, supportedTypes), reasonIfUnsupported,
2489  "ScatterNd: updates type not supported.");
2490 
2491  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2492  "ScatterNd: output type not supported");
2493 
2494  supported &= CheckSupportRule(TypesAreEqual(updates, output), reasonIfUnsupported,
2495  "ScatterNd: input and updates types are mismatched");
2496 
2497  if (descriptor.m_InputEnabled)
2498  {
2499  // If the input slot is enabled, we have the input tensor in this slot
2500  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2501  "ScatterNd: input type not supported.");
2502 
2503  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2504  "ScatterNd: input and output types are mismatched");
2505  }
2506  else
2507  {
2508  // If the input slot is not enabled, we have the shape tensor in this slot
2509  supported &= CheckSupportRule(TypeAnyOf(input, indicesSupportedTypes), reasonIfUnsupported,
2510  "ScatterNd: shape type not supported.");
2511  }
2512 
2513  return supported;
2514 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ScatterNdDescriptor::m_InputEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsShapeSupported()

bool IsShapeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2516 of file RefLayerSupport.cpp.

2519 {
2520  IgnoreUnused(input);
2521  bool supported = true;
2522 
2523  std::array<DataType, 1> supportedTypes =
2524  {
2526  };
2527 
2528  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2529  "Reference Shape: output type not supported");
2530 
2531  return supported;
2532 }

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2534 of file RefLayerSupport.cpp.

2538 {
2539  IgnoreUnused(descriptor);
2540  bool supported = true;
2541 
2542  std::array<DataType, 5> supportedTypes =
2543  {
2549  };
2550 
2551  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2552  "Reference Slice: input type not supported");
2553 
2554  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2555  "Reference Slice: output type not supported");
2556 
2557  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2558  "Reference Slice: input and output types are mismatched");
2559 
2560  return supported;
2561 }

References armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2563 of file RefLayerSupport.cpp.

2567 {
2568  IgnoreUnused(descriptor);
2569  bool supported = true;
2570  std::array<DataType,7> supportedTypes =
2571  {
2578  };
2579 
2580  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2581  "Reference Softmax: output type not supported");
2582 
2583  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2584  "Reference Softmax: input type not supported");
2585 
2586  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2587  "Reference Softmax: input type not supported");
2588 
2589  return supported;
2590 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2592 of file RefLayerSupport.cpp.

2596 {
2597  IgnoreUnused(descriptor);
2598  bool supported = true;
2599  std::array<DataType,6> supportedTypes =
2600  {
2606  };
2607 
2608  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2609  "Reference SpaceToBatchNd: input type not supported");
2610 
2611  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2612  "Reference SpaceToBatchNd: output type not supported");
2613 
2614  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2615  "Reference SpaceToBatchNd: input and output types are mismatched");
2616 
2617  return supported;
2618 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2620 of file RefLayerSupport.cpp.

2624 {
2625 
2626  IgnoreUnused(descriptor);
2627  bool supported = true;
2628 
2629  std::array<DataType,6> supportedTypes =
2630  {
2636  };
2637 
2638  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2639  "Reference SpaceToDepth: input type not supported");
2640 
2641  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2642  "Reference SpaceToDepth: output type not supported");
2643 
2644  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2645  "Reference SpaceToDepth: input and output types are mismatched");
2646 
2647  return supported;
2648 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2650 of file RefLayerSupport.cpp.

2654 {
2655  IgnoreUnused(descriptor);
2656  bool supported = true;
2657  std::array<DataType,6> supportedTypes =
2658  {
2664  };
2665 
2666  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2667  "Reference splitter: output type not supported");
2668  for (const TensorInfo& output : outputs)
2669  {
2670  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2671  "Reference splitter: input type not supported");
2672 
2673  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2674  "Reference splitter: input and output types mismatched.");
2675  }
2676 
2677  return supported;
2678 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo * > &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2680 of file RefLayerSupport.cpp.

2684 {
2685  IgnoreUnused(descriptor);
2686 
2687  bool supported = true;
2688  std::array<DataType,7> supportedTypes =
2689  {
2696  };
2697 
2698  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2699  "Reference stack: output type not supported");
2700  for (const TensorInfo* input : inputs)
2701  {
2702  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2703  "Reference stack: input type not supported");
2704 
2705  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2706  "Reference stack: input and output types mismatched.");
2707  }
2708 
2709  return supported;
2710 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2712 of file RefLayerSupport.cpp.

2716 {
2717  IgnoreUnused(descriptor);
2718  bool supported = true;
2719 
2720  std::array<DataType,5> supportedTypes =
2721  {
2726  };
2727 
2728  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2729  "Reference StridedSlice: input type not supported");
2730 
2731  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2732  "Reference StridedSlice: output type not supported");
2733 
2734  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2735  "Reference StridedSlice: input and output types are mismatched");
2736 
2737  return supported;
2738 }

References armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2740 of file RefLayerSupport.cpp.

2744 {
2745  bool supported = true;
2746 
2747  std::array<DataType,7> supportedTypes = {
2754  };
2755 
2756  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2757  "Reference subtraction: input 0 is not a supported type.");
2758 
2759  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2760  "Reference subtraction: input 1 is not a supported type.");
2761 
2762  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2763  "Reference subtraction: output is not a supported type.");
2764 
2765  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2766  "Reference subtraction: input 0 and Input 1 types are mismatched");
2767 
2768  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2769  "Reference subtraction: input and output types are mismatched");
2770 
2771  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2772  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2773 
2774  return supported;
2775 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsTileSupported()

bool IsTileSupported ( const TensorInfo input,
const TensorInfo output,
const TileDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2811 of file RefLayerSupport.cpp.

2815 {
2816  IgnoreUnused(descriptor);
2817 
2818  bool supported = true;
2819 
2820  std::array<DataType, 7> supportedTypes
2821  {
2829  };
2830 
2831  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2832  "Tile: input type not supported.");
2833 
2834  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2835  "Tile: output type not supported");
2836 
2837  return supported;
2838 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2840 of file RefLayerSupport.cpp.

2846 {
2847  IgnoreUnused(descriptor);
2848  bool supported = true;
2849 
2850  std::array<DataType,7> supportedTypes =
2851  {
2858  };
2859 
2860  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2861  "Reference TransposeConvolution2d: input is not a supported type.");
2862 
2863  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2864  "Reference TransposeConvolution2d: output is not a supported type.");
2865 
2866  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2867  "Reference TransposeConvolution2d: input and output types mismatched.");
2868 
2869 
2870  const DataType inputType = input.GetDataType();
2871  if (IsQuantized8BitType(inputType))
2872  {
2873  std::array<DataType, 3> supportedWeightTypes =
2874  {
2878  };
2879 
2880  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2881  "Reference TransposeConvolution2d: weights type not supported for "
2882  "quantized input.");
2883  }
2884  else
2885  {
2886  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2887  "Reference TransposeConvolution2d: weights is not a supported type.");
2888 
2889  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2890  "Reference TransposeConvolution2d: input and weights types mismatched.");
2891  }
2892 
2893  if (biases.has_value())
2894  {
2895  std::array<DataType,4> biasesSupportedTypes =
2896  {
2900  };
2901  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2902  "Reference TransposeConvolution2d: biases is not a supported type.");
2903  }
2904 
2905  return supported;
2906 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2908 of file RefLayerSupport.cpp.

2912 {
2913  IgnoreUnused(descriptor);
2914  bool supported = true;
2915 
2916  // Define supported output and inputs types.
2917  std::array<DataType, 6> supportedTypes =
2918  {
2925  };
2926 
2927  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2928  "Reference transpose: input is not a supported type.");
2929 
2930  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2931  "Reference transpose: output is not a supported type.");
2932 
2933  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2934  "Reference transpose: input and output types are mismatched.");
2935 
2936  return supported;
2937 }

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const UnidirectionalSequenceLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 2939 of file RefLayerSupport.cpp.

2949 {
2950  IgnoreUnused(descriptor);
2951  IgnoreUnused(paramsInfo);
2952  IgnoreUnused(outputStateIn);
2953  IgnoreUnused(cellStateIn);
2954  IgnoreUnused(outputStateOut);
2955  IgnoreUnused(cellStateOut);
2956  bool supported = true;
2957 
2958  std::array<DataType, 2> supportedTypes =
2959  {
2962  };
2963 
2964  std::array<DataType, 2> supportedWeightTypes =
2965  {
2968  };
2969 
2970  std::array<DataType, 3> supportedBiasTypes =
2971  {
2975  };
2976 
2977  // check inputs and outputs
2978  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2979  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2980  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2981  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2982 
2983  // check layer parameters
2984  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2985  reasonIfUnsupported,
2986  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2987  "is not a supported type.");
2988  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2989  reasonIfUnsupported,
2990  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2991  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2992  reasonIfUnsupported,
2993  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2994  "is not a supported type.");
2995  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2996  reasonIfUnsupported,
2997  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2998  "is not a supported type.");
2999  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
3000  reasonIfUnsupported,
3001  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
3002  "is not a supported type.");
3003  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
3004  reasonIfUnsupported,
3005  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
3006  "is not a supported type.");
3007 
3008  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
3009  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
3010  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
3011  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
3012  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
3013  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
3014  if (!descriptor.m_CifgEnabled)
3015  {
3016  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
3017  reasonIfUnsupported,
3018  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
3019  "is not a supported type.");
3020  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
3021  reasonIfUnsupported,
3022  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
3023  "is not a supported type.");
3024  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
3025  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
3026  if (descriptor.m_PeepholeEnabled)
3027  {
3028  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
3029  reasonIfUnsupported,
3030  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
3031  "is not a supported type.");
3032  }
3033  }
3034  if (descriptor.m_PeepholeEnabled)
3035  {
3036  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
3037  reasonIfUnsupported,
3038  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
3039  "is not a supported type.");
3040  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
3041  reasonIfUnsupported,
3042  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
3043  "is not a supported type.");
3044  }
3045  if (descriptor.m_ProjectionEnabled)
3046  {
3047  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
3048  reasonIfUnsupported,
3049  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
3050  "is not a supported type.");
3051  if (paramsInfo.m_ProjectionBias != nullptr)
3052  {
3053  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
3054  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
3055  "are mismatched");
3056  }
3057  }
3058  if (descriptor.m_LayerNormEnabled)
3059  {
3060  if (!descriptor.m_CifgEnabled)
3061  {
3062  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
3063  reasonIfUnsupported,
3064  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
3065  "is not a supported type.");
3066  }
3067  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
3068  reasonIfUnsupported,
3069  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
3070  "is not a supported type.");
3071  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
3072  reasonIfUnsupported,
3073  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
3074  "is not a supported type.");
3075  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
3076  reasonIfUnsupported,
3077  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
3078  "is not a supported type.");
3079  }
3080 
3081  return supported;
3082 }

References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, armnn::QAsymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().


The documentation for this class was generated from the following files:
armnn::RefLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1789
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::RefLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1235
armnn::DataType::Boolean
@ Boolean
armnn::LayerType::Permute
@ Permute
armnn::RefLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2231
armnn::LayerType::Splitter
@ Splitter
armnn::RefLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2419
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::RefLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2262
armnn::IsQuantized8BitType
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:316
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu
armnn::RefLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2811
armnn::RefLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1039
armnn::RefLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1512
armnn::RefLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1304
armnn::ActivationFunction::SoftReLu
@ SoftReLu
armnn::RefLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2201
armnn::RefLayerSupport::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1339
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::LayerType::Floor
@ Floor
armnn::RefLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:628
armnn::RefLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1664
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::RefLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1937
armnn::RefLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2840
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::DataType::Float32
@ Float32
armnn::ActivationFunction::TanH
@ TanH
armnn::LayerType::Tile
@ Tile
armnn::RefLayerSupport::IsRankSupported
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2321
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::LayerType::Stack
@ Stack
armnn::DataType::QSymmS8
@ QSymmS8
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::Reduce
@ Reduce
armnn::RefLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2650
armnn::RefLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1108
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::RefLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:697
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ActivationFunction::Gelu
@ Gelu
armnn::RefLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2740
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::RefLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2286
armnn::DataType::BFloat16
@ BFloat16
armnn::RefLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:665
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::RefLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2170
armnn::RefLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:915
armnn::RefLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:742
armnn::LayerType::Slice
@ Slice
armnn::RefLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2620
armnn::RefLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:560
armnn::DataType::Float16
@ Float16
armnn::RefLayerSupport::IsFakeQuantizationSupported
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1469
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::LayerSupportBase::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:120
armnn::RefLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1418
armnn::RefLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:949
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::RefLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2336
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::ScatterNd
@ ScatterNd
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::Concat
@ Concat
armnn::RefLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2389
armnn::RefLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1206
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::RefLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:788
armnn::RefLayerSupport::IsBroadcastToSupported
bool IsBroadcastToSupported(const TensorInfo &input, const TensorInfo &output, const BroadcastToDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:823
armnn::ActivationFunction::Elu
@ Elu
armnn::RefLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2592
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::RefLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1701
armnn::LayerType::Quantize
@ Quantize
armnn::RefLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1900
armnn::ActivationFunction::Linear
@ Linear
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::RefLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2365
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::LayerType::BroadcastTo
@ BroadcastTo
armnn::RefLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:853
armnn::RefLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1534
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::LayerType::Division
@ Division
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::RefLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2134
armnn::RefLayerSupport::IsDebugSupported
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1176
armnn::ActivationFunction::Abs
@ Abs
armnn::RefLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:999
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::RefLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: RefLayerSupport.cpp:1735
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::RefLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1381
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::RefLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2102
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::RefLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2777
armnn::RefLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1487
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::RefLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2065
armnn::LayerType::Minimum
@ Minimum
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::RefLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2712
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::RefLayerSupport::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1999
armnn::RefLayerSupport::IsScatterNdSupported
bool IsScatterNdSupported(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &updates, const TensorInfo &output, const ScatterNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2458
armnn::ActivationFunction::ReLu
@ ReLu
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::RefLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:981
armnn::LayerType::Rank
@ Rank
armnn::RefLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:883
armnn::LayerType::Mean
@ Mean
armnn::RefLayerSupport::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2516
armnn::ActivationFunction::Square
@ Square
armnn::RefLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2908
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::RefLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1019
armnn::RefLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1631
armnn::RefLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2680
armnn::CheckSupportRule
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
Definition: LayerSupportRules.hpp:37
armnn::RefLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: RefLayerSupport.cpp:1763
armnn::DataType::Signed64
@ Signed64
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::RefLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2563
armnn::RefLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2534
armnn::LayerType::Unmap
@ Unmap
armnn::RefLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2140
armnn::LayerType::QLstm
@ QLstm
armnn::RefLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1600
armnn::RefLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2939
armnn::RefLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1670
armnn::RefLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2028
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::ActivationFunction::Sigmoid
@ Sigmoid