ArmNN
 25.11
Loading...
Searching...
No Matches
NeonLayerSupport Class Reference

#include <NeonLayerSupport.hpp>

Inheritance diagram for NeonLayerSupport:
[legend]
Collaboration diagram for NeonLayerSupport:
[legend]

Public Member Functions

 NeonLayerSupport (const IBackendInternal::IBackendSpecificModelContextPtr &modelContextPtr)
 NeonLayerSupport ()
 ~NeonLayerSupport ()
bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
 Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFusedSupported (const std::vector< std::reference_wrapper< TensorInfo > > &inputs, const std::vector< std::reference_wrapper< TensorInfo > > &outputs, const FusedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReverseV2Supported (const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo > > &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTileSupported (const TensorInfo &input0, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Public Member Functions inherited from LayerSupportBase
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const

Additional Inherited Members

Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
virtual ~ILayerSupport ()

Detailed Description

Definition at line 14 of file NeonLayerSupport.hpp.

Constructor & Destructor Documentation

◆ NeonLayerSupport() [1/2]

Definition at line 175 of file NeonLayerSupport.cpp.

176 : m_ModelContextPtr(modelContextPtr)
177{
178}

◆ NeonLayerSupport() [2/2]

Definition at line 180 of file NeonLayerSupport.cpp.

181 : m_ModelContextPtr(nullptr)
182{
183}

◆ ~NeonLayerSupport()

~NeonLayerSupport ( )
inline

Definition at line 20 of file NeonLayerSupport.hpp.

20{}

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo & input,
const TensorInfo & output,
const ActivationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 779 of file NeonLayerSupport.cpp.

783{
784 IgnoreUnused(descriptor);
785 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
786 reasonIfUnsupported,
787 input,
788 output,
789 descriptor);
790}
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
void IgnoreUnused(Ts &&...)

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonActivationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 792 of file NeonLayerSupport.cpp.

796{
797 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
798 reasonIfUnsupported,
799 input0,
800 input1,
801 output,
802 nullptr);
803}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonAdditionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo & input,
const TensorInfo & output,
const ArgMinMaxDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 805 of file NeonLayerSupport.cpp.

809{
810 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonArgMinMaxWorkloadValidate,
811 reasonIfUnsupported,
812 input,
813 output,
814 descriptor);
815}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonArgMinMaxWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo & inputX,
const TensorInfo & inputY,
const TensorInfo & output,
const BatchMatMulDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 817 of file NeonLayerSupport.cpp.

822{
823 bool isFastMathEnabled = false;
824#if defined(ARMCOMPUTENEON_ENABLED)
825 if (m_ModelContextPtr)
826 {
827 if (m_ModelContextPtr.get() != nullptr)
828 {
829 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
830 if (modelOptions)
831 {
832 isFastMathEnabled = modelOptions->IsFastMathEnabled();
833 }
834 }
835 }
836#endif
837 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchMatMulValidate,
838 reasonIfUnsupported,
839 inputX,
840 inputY,
841 output,
842 descriptor,
843 isFastMathEnabled,
844 nullptr);
845}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchMatMulValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo & input,
const TensorInfo & output,
const TensorInfo & mean,
const TensorInfo & var,
const TensorInfo & beta,
const TensorInfo & gamma,
const BatchNormalizationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 847 of file NeonLayerSupport.cpp.

855{
856 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
857 reasonIfUnsupported,
858 input,
859 output,
860 mean,
861 var,
862 beta,
863 gamma,
864 descriptor,
865 nullptr);
866}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchNormalizationValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo & input,
const TensorInfo & output,
const BatchToSpaceNdDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 868 of file NeonLayerSupport.cpp.

872{
873 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchToSpaceNdWorkloadValidate,
874 reasonIfUnsupported,
875 input,
876 output,
877 descriptor);
878}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchToSpaceNdWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 880 of file NeonLayerSupport.cpp.

883{
884 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonCastValidate,
885 reasonIfUnsupported,
886 input,
887 output);
888}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonCastValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo & input,
const TensorInfo & output,
const ChannelShuffleDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 890 of file NeonLayerSupport.cpp.

894{
895 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonChannelShuffleValidate,
896 reasonIfUnsupported,
897 input,
898 output,
899 descriptor);
900}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonChannelShuffleValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
const ComparisonDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 902 of file NeonLayerSupport.cpp.

907{
908
909 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonComparisonWorkloadValidate,
910 reasonIfUnsupported,
911 input0,
912 input1,
913 output,
914 descriptor);
915}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonComparisonWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo * > inputs,
const TensorInfo & output,
const OriginsDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 917 of file NeonLayerSupport.cpp.

921{
922 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
923 {
924 SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
925 return false;
926 }
927
928 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
929 if(concatInnerAxis < 3) // Width, height, or channels
930 {
931 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
932 reasonIfUnsupported,
933 inputs,
934 output,
935 descriptor);
936 }
937 else if (concatInnerAxis == 3)
938 {
939 for (auto& input : inputs)
940 {
941 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
942 {
943 SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
944 return false;
945 }
946 }
947 return true; // Sub-tensors support concat along batch
948 }
949 else // > 4 dimensions not supported.
950 {
951 SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
952 return false;
953 }
954}
void SetValueChecked(Optional< T & > optionalRef, V &&val)

References FORWARD_WORKLOAD_VALIDATE_FUNC, OriginsDescriptor::GetConcatAxis(), OriginsDescriptor::GetNumDimensions(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonConcatWorkloadValidate(), and armnn::SetValueChecked().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 956 of file NeonLayerSupport.cpp.

958{
959 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConstantWorkloadValidate,
960 reasonIfUnsupported,
961 output);
962}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConstantWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 964 of file NeonLayerSupport.cpp.

967{
968 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvertFp16ToFp32WorkloadValidate,
969 reasonIfUnsupported,
970 input,
971 output);
972}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvertFp16ToFp32WorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 974 of file NeonLayerSupport.cpp.

977{
978 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvertFp32ToFp16WorkloadValidate,
979 reasonIfUnsupported,
980 input,
981 output);
982}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvertFp32ToFp16WorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo & input,
const TensorInfo & output,
const Convolution2dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 984 of file NeonLayerSupport.cpp.

990{
991 bool isFastMathEnabled = false;
992#if defined(ARMCOMPUTENEON_ENABLED)
993 if (m_ModelContextPtr)
994 {
995 if (m_ModelContextPtr.get() != nullptr)
996 {
997 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
998 if (modelOptions)
999 {
1000 isFastMathEnabled = modelOptions->IsFastMathEnabled();
1001 }
1002 }
1003 }
1004#endif
1005
1006 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
1007 reasonIfUnsupported,
1008 input,
1009 output,
1010 descriptor,
1011 weights,
1012 biases,
1013 isFastMathEnabled,
1014 nullptr);
1015}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvolution2dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo & input,
const TensorInfo & output,
const Convolution3dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1017 of file NeonLayerSupport.cpp.

1023{
1024 bool isFastMathEnabled = false;
1025#if defined(ARMCOMPUTENEON_ENABLED)
1026 if (m_ModelContextPtr)
1027 {
1028 if (m_ModelContextPtr.get() != nullptr)
1029 {
1030 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
1031 if (modelOptions)
1032 {
1033 isFastMathEnabled = modelOptions->IsFastMathEnabled();
1034 }
1035 }
1036 }
1037#endif
1038
1039 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution3dWorkloadValidate,
1040 reasonIfUnsupported,
1041 input,
1042 output,
1043 descriptor,
1044 weights,
1045 biases,
1046 isFastMathEnabled,
1047 nullptr);
1048}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvolution3dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo & input,
const TensorInfo & output,
const DepthToSpaceDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1050 of file NeonLayerSupport.cpp.

1054{
1055 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthToSpaceWorkloadValidate,
1056 reasonIfUnsupported,
1057 input,
1058 output,
1059 descriptor);
1060}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthToSpaceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo & input,
const TensorInfo & output,
const DepthwiseConvolution2dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1062 of file NeonLayerSupport.cpp.

1068{
1069 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
1070 reasonIfUnsupported,
1071 input,
1072 output,
1073 descriptor,
1074 weights,
1075 biases,
1076 nullptr);
1077}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1079 of file NeonLayerSupport.cpp.

1082{
1083 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDequantizeWorkloadValidate,
1084 reasonIfUnsupported,
1085 input,
1086 output);
1087}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDequantizeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo & input,
const TensorInfo & output,
const DepthwiseConvolution2dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reason = EmptyOptional() ) const

Definition at line 1089 of file NeonLayerSupport.cpp.

1095{
1096 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
1097 reasonIfUnsupported,
1098 input,
1099 output,
1100 descriptor,
1101 weights,
1102 biases,
1103 nullptr);
1104}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1373 of file NeonLayerSupport.cpp.

1377{
1378 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDivisionWorkloadValidate,
1379 reasonIfUnsupported,
1380 input0,
1381 input1,
1382 output,
1383 nullptr);
1384}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDivisionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo & input,
const TensorInfo & output,
const ElementwiseUnaryDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1106 of file NeonLayerSupport.cpp.

1110{
1111 switch(descriptor.m_Operation)
1112 {
1113 case UnaryOperation::Abs:
1114 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate,
1115 reasonIfUnsupported,
1116 input,
1117 output);
1118 case UnaryOperation::Exp:
1119 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonExpWorkloadValidate,
1120 reasonIfUnsupported,
1121 input,
1122 output);
1123 case UnaryOperation::LogicalNot:
1124 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalNotWorkloadValidate,
1125 reasonIfUnsupported,
1126 input,
1127 output);
1128 case UnaryOperation::Log:
1129 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogWorkloadValidate,
1130 reasonIfUnsupported,
1131 input,
1132 output);
1133 case UnaryOperation::Neg:
1134 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNegWorkloadValidate,
1135 reasonIfUnsupported,
1136 input,
1137 output);
1138 case UnaryOperation::Rsqrt:
1139 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate,
1140 reasonIfUnsupported,
1141 input,
1142 output);
1143 case UnaryOperation::Sin:
1144 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSinWorkloadValidate,
1145 reasonIfUnsupported,
1146 input,
1147 output);
1148 case UnaryOperation::Sqrt:
1149 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSqrtWorkloadValidate,
1150 reasonIfUnsupported,
1151 input,
1152 output);
1153 default:
1154 return false;
1155 }
1156}

References armnn::Abs, armnn::Exp, FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::Log, armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::Neg, armnn::NeonAbsWorkloadValidate(), armnn::NeonExpWorkloadValidate(), armnn::NeonLogicalNotWorkloadValidate(), armnn::NeonLogWorkloadValidate(), armnn::NeonNegWorkloadValidate(), armnn::NeonRsqrtWorkloadValidate(), armnn::NeonSinWorkloadValidate(), armnn::NeonSqrtWorkloadValidate(), armnn::Rsqrt, armnn::Sin, and armnn::Sqrt.

Referenced by armnn::IsLayerTypeSupported().

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo & input,
const TensorInfo & output,
const FillDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1158 of file NeonLayerSupport.cpp.

1162{
1163 armnn::IgnoreUnused(input);
1164 armnn::IgnoreUnused(output);
1165 armnn::IgnoreUnused(descriptor);
1166
1167 return IsNeonBackendSupported(reasonIfUnsupported);
1168}

References armnn::IgnoreUnused().

Referenced by armnn::IsLayerTypeSupported().

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1170 of file NeonLayerSupport.cpp.

1173{
1174 armnn::IgnoreUnused(output);
1175 return IsNeonBackendSupported(reasonIfUnsupported) &&
1176 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1177 input.GetDataType(),
1178 &FalseFuncF16<>,
1179 &TrueFunc<>,
1180 &FalseFuncU8<>,
1181 &FalseFuncI32<>,
1182 &FalseFuncU8<>);
1183}
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

References armnn::FalseFuncF16(), armnn::FalseFuncI32(), armnn::FalseFuncU8(), TensorInfo::GetDataType(), armnn::IgnoreUnused(), armnn::IsSupportedForDataTypeGeneric(), and armnn::TrueFunc().

Referenced by armnn::IsLayerTypeSupported().

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo & input,
const TensorInfo & output,
const TensorInfo & weights,
const TensorInfo & biases,
const FullyConnectedDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1185 of file NeonLayerSupport.cpp.

1191{
1192 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
1193 reasonIfUnsupported,
1194 input,
1195 output,
1196 weights,
1197 biases,
1198 descriptor,
1199 nullptr);
1200}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFullyConnectedWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsFusedSupported()

bool IsFusedSupported ( const std::vector< std::reference_wrapper< TensorInfo > > & inputs,
const std::vector< std::reference_wrapper< TensorInfo > > & outputs,
const FusedDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1202 of file NeonLayerSupport.cpp.

1206{
1207 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFusedWorkloadValidate,
1208 reasonIfUnsupported,
1209 inputs,
1210 outputs,
1211 descriptor,
1212 nullptr);
1213}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFusedWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported ) const

Definition at line 1229 of file NeonLayerSupport.cpp.

1233{
1234 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGatherNdWorkloadValidate,
1235 reasonIfUnsupported,
1236 input0,
1237 input1,
1238 output);
1239}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherNdWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
const GatherDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported ) const

Definition at line 1215 of file NeonLayerSupport.cpp.

1220{
1221 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGatherWorkloadValidate,
1222 reasonIfUnsupported,
1223 input0,
1224 input1,
1225 output,
1226 descriptor);
1227}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo & input,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1241 of file NeonLayerSupport.cpp.

1243{
1244 return IsNeonBackendSupported(reasonIfUnsupported, input);
1245}

Referenced by armnn::IsLayerTypeSupported().

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo & input,
const TensorInfo & output,
const InstanceNormalizationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1247 of file NeonLayerSupport.cpp.

1251{
1252 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonInstanceNormalizationWorkloadValidate,
1253 reasonIfUnsupported,
1254 input,
1255 output,
1256 descriptor);
1257}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonInstanceNormalizationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo & input,
const TensorInfo & output,
const L2NormalizationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1259 of file NeonLayerSupport.cpp.

1263{
1264 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1265}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonL2NormalizationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType & type,
const std::vector< TensorInfo > & infos,
const BaseDescriptor & descriptor,
const Optional< LstmInputParamsInfo > & lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > & quantizedLstmParamsInfo,
Optional< std::string & > reasonIfUnsupported ) const
overridevirtual

Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.

Reimplemented from LayerSupportBase.

Definition at line 731 of file NeonLayerSupport.cpp.

737{
738 bool isSupported = IsLayerTypeSupported(type,
739 infos,
740 descriptor,
741 lstmParamsInfo,
742 quantizedLstmParamsInfo,
743 reasonIfUnsupported,
744 *this);
745
746 // For android-nn-driver to run FP16 operations on CpuAcc we need at least v8.2
747 // architecture. If the available architecture is older than v8.2, we can check if the operator is
748 // supported by changing operator inputs & outputs to be FP32.
749 // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
750 // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
751 // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
752 // and, insert convert layers around the FP32 operator.
753 if (reasonIfUnsupported.has_value())
754 {
755 std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
756 if (!isSupported
757 && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
758 {
759 std::vector<TensorInfo> newInfos;
760 for (auto info: infos)
761 {
762 newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
763 }
764
765 std::string tmpString;
766 return IsLayerTypeSupported(type,
767 newInfos,
768 descriptor,
769 lstmParamsInfo,
770 quantizedLstmParamsInfo,
771 tmpString,
772 *this);
773 }
774 }
775
776 return isSupported;
777}
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)

References armnn::Float32, OptionalBase::has_value(), armnn::info, armnn::IsLayerTypeSupported(), and OptionalReferenceSwitch< IsReference, T >::value().

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
const LogicalBinaryDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported ) const

Definition at line 1267 of file NeonLayerSupport.cpp.

1272{
1273 switch(descriptor.m_Operation)
1274 {
1275 case LogicalBinaryOperation::LogicalAnd:
1276 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalAndWorkloadValidate,
1277 reasonIfUnsupported,
1278 input0,
1279 input1,
1280 output);
1281 case LogicalBinaryOperation::LogicalOr:
1282 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalOrWorkloadValidate,
1283 reasonIfUnsupported,
1284 input0,
1285 input1,
1286 output);
1287 default:
1288 return false;
1289 }
1290}

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::LogicalAnd, armnn::LogicalOr, LogicalBinaryDescriptor::m_Operation, armnn::NeonLogicalAndWorkloadValidate(), and armnn::NeonLogicalOrWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo & input,
const TensorInfo & output,
const LogSoftmaxDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1292 of file NeonLayerSupport.cpp.

1296{
1297 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1298}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLogSoftmaxWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo & input,
const TensorInfo & outputStateIn,
const TensorInfo & cellStateIn,
const TensorInfo & scratchBuffer,
const TensorInfo & outputStateOut,
const TensorInfo & cellStateOut,
const TensorInfo & output,
const LstmDescriptor & descriptor,
const LstmInputParamsInfo & paramsInfo,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1300 of file NeonLayerSupport.cpp.

1310{
1311 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLstmFloatWorkloadValidate,
1312 reasonIfUnsupported,
1313 input,
1314 outputStateIn,
1315 cellStateIn,
1316 scratchBuffer,
1317 outputStateOut,
1318 cellStateOut,
1319 output,
1320 descriptor,
1321 paramsInfo);
1322}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLstmFloatWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1324 of file NeonLayerSupport.cpp.

1328{
1329 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
1330 reasonIfUnsupported,
1331 input0,
1332 input1,
1333 output);
1334}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMaximumWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo & input,
const TensorInfo & output,
const MeanDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1336 of file NeonLayerSupport.cpp.

1340{
1341 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMeanWorkloadValidate,
1342 reasonIfUnsupported,
1343 input,
1344 output,
1345 descriptor);
1346}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMeanWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1348 of file NeonLayerSupport.cpp.

1352{
1353 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
1354 reasonIfUnsupported,
1355 input0,
1356 input1,
1357 output);
1358}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMinimumWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1360 of file NeonLayerSupport.cpp.

1364{
1365 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
1366 reasonIfUnsupported,
1367 input0,
1368 input1,
1369 output,
1370 nullptr);
1371}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMultiplicationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo & input,
const TensorInfo & output,
const NormalizationDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1386 of file NeonLayerSupport.cpp.

1390{
1391 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
1392 reasonIfUnsupported,
1393 input,
1394 output,
1395 descriptor);
1396}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonNormalizationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1398 of file NeonLayerSupport.cpp.

1400{
1401 return IsNeonBackendSupported(reasonIfUnsupported, output);
1402}

Referenced by armnn::IsLayerTypeSupported().

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo & input,
const TensorInfo & output,
const PadDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1404 of file NeonLayerSupport.cpp.

1408{
1409 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
1410 reasonIfUnsupported,
1411 input,
1412 output,
1413 descriptor);
1414}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPadWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo & input,
const TensorInfo & output,
const PermuteDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1416 of file NeonLayerSupport.cpp.

1420{
1421 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1422}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPermuteWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo & input,
const TensorInfo & output,
const Pooling2dDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1424 of file NeonLayerSupport.cpp.

1428{
1429 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1430}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling2dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo & input,
const TensorInfo & output,
const Pooling3dDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1432 of file NeonLayerSupport.cpp.

1436{
1437 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1438}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling3dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo & input,
const TensorInfo & alpha,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1440 of file NeonLayerSupport.cpp.

1444{
1445 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1446}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPreluWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo & input,
const TensorInfo & previousOutputIn,
const TensorInfo & previousCellStateIn,
const TensorInfo & outputStateOut,
const TensorInfo & cellStateOut,
const TensorInfo & output,
const QLstmDescriptor & descriptor,
const LstmInputParamsInfo & paramsInfo,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1448 of file NeonLayerSupport.cpp.

1457{
1458 // Check required here in order to pass IsLayerSupported for datatypes tests
1459 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1460 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1461 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1462 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1463 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1464 output.GetDataType() == armnn::DataType::QAsymmS8)
1465 {
1466 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQLstmWorkloadValidate,
1467 reasonIfUnsupported,
1468 input,
1469 previousCellStateIn,
1470 previousOutputIn,
1471 cellStateOut,
1472 outputStateOut,
1473 output,
1474 descriptor,
1475 paramsInfo);
1476 }
1477 else
1478 {
1479 return false;
1480 }
1481}

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonQLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by armnn::IsLayerTypeSupported().

◆ IsQuantizedLstmSupported()

bool IsQuantizedLstmSupported ( const TensorInfo & input,
const TensorInfo & cellStateIn,
const TensorInfo & outputStateIn,
const TensorInfo & cellStateOut,
const TensorInfo & outputStateOut,
const QuantizedLstmInputParamsInfo & paramsInfo,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1493 of file NeonLayerSupport.cpp.

1500{
1501 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizedLstmWorkloadValidate,
1502 reasonIfUnsupported,
1503 input,
1504 cellStateIn,
1505 outputStateIn,
1506 cellStateOut,
1507 outputStateOut,
1508 paramsInfo);
1509}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizedLstmWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo & input,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1483 of file NeonLayerSupport.cpp.

1486{
1487 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizeWorkloadValidate,
1488 reasonIfUnsupported,
1489 input,
1490 output);
1491}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo & input,
const TensorInfo & output,
const ReduceDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1511 of file NeonLayerSupport.cpp.

1515{
1516 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReduceWorkloadValidate,
1517 reasonIfUnsupported,
1518 input,
1519 output,
1520 descriptor);
1521}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonReduceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo & input,
const TensorInfo & output,
const ReshapeDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1523 of file NeonLayerSupport.cpp.

1527{
1528 armnn::IgnoreUnused(descriptor);
1529 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReshapeWorkloadValidate,
1530 reasonIfUnsupported,
1531 input,
1532 output);
1533}

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonReshapeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo & input,
const TensorInfo & output,
const ResizeDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1535 of file NeonLayerSupport.cpp.

1539{
1540 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeWorkloadValidate,
1541 reasonIfUnsupported,
1542 input,
1543 output,
1544 descriptor);
1545}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonResizeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsReverseV2Supported()

bool IsReverseV2Supported ( const TensorInfo & input,
const TensorInfo & axis,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported ) const

Definition at line 1547 of file NeonLayerSupport.cpp.

1551{
1552 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReverseV2WorkloadValidate,
1553 reasonIfUnsupported,
1554 input,
1555 axis,
1556 output);
1557}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonReverseV2WorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo & input,
const TensorInfo & output,
const SliceDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1559 of file NeonLayerSupport.cpp.

1563{
1564 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSliceWorkloadValidate,
1565 reasonIfUnsupported,
1566 input,
1567 output,
1568 descriptor);
1569}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSliceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo & input,
const TensorInfo & output,
const SoftmaxDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1571 of file NeonLayerSupport.cpp.

1575{
1576 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1577}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSoftmaxWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo & input,
const TensorInfo & output,
const SpaceToBatchNdDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1579 of file NeonLayerSupport.cpp.

1583{
1584 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSpaceToBatchNdWorkloadValidate,
1585 reasonIfUnsupported,
1586 input,
1587 output,
1588 descriptor);
1589}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToBatchNdWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo & input,
const TensorInfo & output,
const SpaceToDepthDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1591 of file NeonLayerSupport.cpp.

1595{
1596 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSpaceToDepthWorkloadValidate,
1597 reasonIfUnsupported,
1598 input,
1599 output,
1600 descriptor);
1601}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToDepthWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo & input,
const std::vector< std::reference_wrapper< TensorInfo > > & outputs,
const ViewsDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1603 of file NeonLayerSupport.cpp.

1607{
1608#if defined(ARMCOMPUTENEON_ENABLED)
1609 // Split along the last dimension, cannot use sub-tensors
1610 // as width and height of the sub-tensors do not match
1611 // the width and height of the parent tensor
1612 // in case of input with more than 2D.
1613 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1614 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1615 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1616 {
1617 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSplitterWorkloadValidate,
1618 reasonIfUnsupported,
1619 input,
1620 outputs,
1621 *splitAxis.begin());
1622 }
1623#endif
1624 IgnoreUnused(descriptor);
1625 for (auto output : outputs)
1626 {
1627 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1628 {
1629 SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1630 return false;
1631 }
1632 }
1633 return true;
1634}
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.

References armnn::ComputeSplitAxis(), FORWARD_WORKLOAD_VALIDATE_FUNC, ViewsDescriptor::GetNumDimensions(), TensorInfo::GetShape(), armnn::IgnoreUnused(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonSplitterWorkloadValidate(), and armnn::SetValueChecked().

Referenced by armnn::IsLayerTypeSupported().

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo * > & inputs,
const TensorInfo & output,
const StackDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1636 of file NeonLayerSupport.cpp.

1640{
1641 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonStackWorkloadValidate,
1642 reasonIfUnsupported,
1643 inputs,
1644 output,
1645 descriptor);
1646}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStackWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo & input,
const TensorInfo & output,
const StridedSliceDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1648 of file NeonLayerSupport.cpp.

1652{
1653 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonStridedSliceWorkloadValidate,
1654 reasonIfUnsupported,
1655 input,
1656 output,
1657 descriptor);
1658}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStridedSliceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo & input0,
const TensorInfo & input1,
const TensorInfo & output,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1660 of file NeonLayerSupport.cpp.

1664{
1665 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
1666 reasonIfUnsupported,
1667 input0,
1668 input1,
1669 output,
1670 nullptr);
1671}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSubtractionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsTileSupported()

bool IsTileSupported ( const TensorInfo & input0,
const TensorInfo & output,
const TileDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1673 of file NeonLayerSupport.cpp.

1677{
1678 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTileWorkloadValidate,
1679 reasonIfUnsupported,
1680 input,
1681 output,
1682 descriptor);
1683}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTileWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo & input,
const TensorInfo & output,
const TransposeConvolution2dDescriptor & descriptor,
const TensorInfo & weights,
const Optional< TensorInfo > & biases,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1685 of file NeonLayerSupport.cpp.

1691{
1692 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeConvolution2dWorkloadValidate,
1693 reasonIfUnsupported,
1694 input,
1695 output,
1696 descriptor,
1697 weights,
1698 biases);
1699}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeConvolution2dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo & input,
const TensorInfo & output,
const TransposeDescriptor & descriptor,
Optional< std::string & > reasonIfUnsupported = EmptyOptional() ) const

Definition at line 1701 of file NeonLayerSupport.cpp.

1705{
1706 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1707}

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo & input,
const TensorInfo & outputStateIn,
const TensorInfo & cellStateIn,
const TensorInfo & outputStateOut,
const TensorInfo & cellStateOut,
const TensorInfo & output,
const UnidirectionalSequenceLstmDescriptor & descriptor,
const LstmInputParamsInfo & paramsInfo,
Optional< std::string & > reasonIfUnsupported ) const

Definition at line 1709 of file NeonLayerSupport.cpp.

1718{
1719 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1720 outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1721 cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1722 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1723 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1724 output.GetDataType() == armnn::DataType::QAsymmS8)
1725 {
1726 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonUnidirectionalSequenceLstmWorkloadValidate,
1727 reasonIfUnsupported,
1728 input,
1729 outputStateIn,
1730 cellStateIn,
1731 outputStateOut,
1732 cellStateOut,
1733 output,
1734 descriptor,
1735 paramsInfo);
1736 }
1737 else
1738 {
1739 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonUnidirectionalSequenceLstmFloatWorkloadValidate,
1740 reasonIfUnsupported,
1741 input,
1742 outputStateIn,
1743 cellStateIn,
1744 outputStateOut,
1745 cellStateOut,
1746 output,
1747 descriptor,
1748 paramsInfo);
1749 }
1750}

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by armnn::IsLayerTypeSupported().


The documentation for this class was generated from the following files: