ArmNN
 24.08
NeonLayerSupport Class Reference

#include <NeonLayerSupport.hpp>

Inheritance diagram for NeonLayerSupport:
[legend]
Collaboration diagram for NeonLayerSupport:
[legend]

Public Member Functions

 NeonLayerSupport (const IBackendInternal::IBackendSpecificModelContextPtr &modelContextPtr)
 
 NeonLayerSupport ()
 
 ~NeonLayerSupport ()
 
bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
 Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported. More...
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFusedSupported (const std::vector< std::reference_wrapper< TensorInfo >> &inputs, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const FusedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsReverseV2Supported (const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsTileSupported (const TensorInfo &input0, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
 
- Public Member Functions inherited from LayerSupportBase
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 14 of file NeonLayerSupport.hpp.

Constructor & Destructor Documentation

◆ NeonLayerSupport() [1/2]

Definition at line 174 of file NeonLayerSupport.cpp.

175  : m_ModelContextPtr(modelContextPtr)
176 {
177 }

◆ NeonLayerSupport() [2/2]

Definition at line 179 of file NeonLayerSupport.cpp.

180  : m_ModelContextPtr(nullptr)
181 {
182 }

◆ ~NeonLayerSupport()

~NeonLayerSupport ( )
inline

Definition at line 20 of file NeonLayerSupport.hpp.

20 {}

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 770 of file NeonLayerSupport.cpp.

774 {
775  IgnoreUnused(descriptor);
777  reasonIfUnsupported,
778  input,
779  output,
780  descriptor);
781 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonActivationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 783 of file NeonLayerSupport.cpp.

787 {
789  reasonIfUnsupported,
790  input0,
791  input1,
792  output,
793  nullptr);
794 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonAdditionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 796 of file NeonLayerSupport.cpp.

800 {
802  reasonIfUnsupported,
803  input,
804  output,
805  descriptor);
806 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonArgMinMaxWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo inputX,
const TensorInfo inputY,
const TensorInfo output,
const BatchMatMulDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 808 of file NeonLayerSupport.cpp.

813 {
814  bool isFastMathEnabled = false;
815 #if defined(ARMCOMPUTENEON_ENABLED)
816  if (m_ModelContextPtr)
817  {
818  if (m_ModelContextPtr.get() != nullptr)
819  {
820  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
821  if (modelOptions)
822  {
823  isFastMathEnabled = modelOptions->IsFastMathEnabled();
824  }
825  }
826  }
827 #endif
829  reasonIfUnsupported,
830  inputX,
831  inputY,
832  output,
833  descriptor,
834  isFastMathEnabled,
835  nullptr);
836 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonBatchMatMulValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 838 of file NeonLayerSupport.cpp.

846 {
848  reasonIfUnsupported,
849  input,
850  output,
851  mean,
852  var,
853  beta,
854  gamma,
855  descriptor,
856  nullptr);
857 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchNormalizationValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 859 of file NeonLayerSupport.cpp.

863 {
865  reasonIfUnsupported,
866  input,
867  output,
868  descriptor);
869 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchToSpaceNdWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 871 of file NeonLayerSupport.cpp.

874 {
876  reasonIfUnsupported,
877  input,
878  output);
879 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonCastValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 881 of file NeonLayerSupport.cpp.

885 {
887  reasonIfUnsupported,
888  input,
889  output,
890  descriptor);
891 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonChannelShuffleValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 893 of file NeonLayerSupport.cpp.

898 {
899 
901  reasonIfUnsupported,
902  input0,
903  input1,
904  output,
905  descriptor);
906 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonComparisonWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo * >  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 908 of file NeonLayerSupport.cpp.

912 {
913  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
914  {
915  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
916  return false;
917  }
918 
919  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
920  if(concatInnerAxis < 3) // Width, height, or channels
921  {
923  reasonIfUnsupported,
924  inputs,
925  output,
926  descriptor);
927  }
928  else if (concatInnerAxis == 3)
929  {
930  for (auto& input : inputs)
931  {
932  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
933  {
934  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
935  return false;
936  }
937  }
938  return true; // Sub-tensors support concat along batch
939  }
940  else // > 4 dimensions not supported.
941  {
942  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
943  return false;
944  }
945 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, OriginsDescriptor::GetConcatAxis(), OriginsDescriptor::GetNumDimensions(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonConcatWorkloadValidate(), and armnn::SetValueChecked().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 947 of file NeonLayerSupport.cpp.

949 {
951  reasonIfUnsupported,
952  output);
953 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConstantWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 955 of file NeonLayerSupport.cpp.

958 {
960  reasonIfUnsupported,
961  input,
962  output);
963 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvertFp16ToFp32WorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 965 of file NeonLayerSupport.cpp.

968 {
970  reasonIfUnsupported,
971  input,
972  output);
973 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvertFp32ToFp16WorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 975 of file NeonLayerSupport.cpp.

981 {
982  bool isFastMathEnabled = false;
983 #if defined(ARMCOMPUTENEON_ENABLED)
984  if (m_ModelContextPtr)
985  {
986  if (m_ModelContextPtr.get() != nullptr)
987  {
988  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
989  if (modelOptions)
990  {
991  isFastMathEnabled = modelOptions->IsFastMathEnabled();
992  }
993  }
994  }
995 #endif
996 
998  reasonIfUnsupported,
999  input,
1000  output,
1001  descriptor,
1002  weights,
1003  biases,
1004  isFastMathEnabled,
1005  nullptr);
1006 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonConvolution2dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1008 of file NeonLayerSupport.cpp.

1014 {
1015  bool isFastMathEnabled = false;
1016 #if defined(ARMCOMPUTENEON_ENABLED)
1017  if (m_ModelContextPtr)
1018  {
1019  if (m_ModelContextPtr.get() != nullptr)
1020  {
1021  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
1022  if (modelOptions)
1023  {
1024  isFastMathEnabled = modelOptions->IsFastMathEnabled();
1025  }
1026  }
1027  }
1028 #endif
1029 
1031  reasonIfUnsupported,
1032  input,
1033  output,
1034  descriptor,
1035  weights,
1036  biases,
1037  isFastMathEnabled,
1038  nullptr);
1039 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonConvolution3dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1041 of file NeonLayerSupport.cpp.

1045 {
1047  reasonIfUnsupported,
1048  input,
1049  output,
1050  descriptor);
1051 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthToSpaceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1053 of file NeonLayerSupport.cpp.

1059 {
1061  reasonIfUnsupported,
1062  input,
1063  output,
1064  descriptor,
1065  weights,
1066  biases,
1067  nullptr);
1068 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1070 of file NeonLayerSupport.cpp.

1073 {
1075  reasonIfUnsupported,
1076  input,
1077  output);
1078 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDequantizeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reason = EmptyOptional() 
) const

Definition at line 1080 of file NeonLayerSupport.cpp.

1086 {
1088  reasonIfUnsupported,
1089  input,
1090  output,
1091  descriptor,
1092  weights,
1093  biases,
1094  nullptr);
1095 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1364 of file NeonLayerSupport.cpp.

1368 {
1370  reasonIfUnsupported,
1371  input0,
1372  input1,
1373  output,
1374  nullptr);
1375 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDivisionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1097 of file NeonLayerSupport.cpp.

1101 {
1102  switch(descriptor.m_Operation)
1103  {
1104  case UnaryOperation::Abs:
1106  reasonIfUnsupported,
1107  input,
1108  output);
1109  case UnaryOperation::Exp:
1111  reasonIfUnsupported,
1112  input,
1113  output);
1116  reasonIfUnsupported,
1117  input,
1118  output);
1119  case UnaryOperation::Log:
1121  reasonIfUnsupported,
1122  input,
1123  output);
1124  case UnaryOperation::Neg:
1126  reasonIfUnsupported,
1127  input,
1128  output);
1129  case UnaryOperation::Rsqrt:
1131  reasonIfUnsupported,
1132  input,
1133  output);
1134  case UnaryOperation::Sin:
1136  reasonIfUnsupported,
1137  input,
1138  output);
1139  case UnaryOperation::Sqrt:
1141  reasonIfUnsupported,
1142  input,
1143  output);
1144  default:
1145  return false;
1146  }
1147 }

References armnn::Abs, armnn::Exp, FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::Log, armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::Neg, armnn::NeonAbsWorkloadValidate(), armnn::NeonExpWorkloadValidate(), armnn::NeonLogicalNotWorkloadValidate(), armnn::NeonLogWorkloadValidate(), armnn::NeonNegWorkloadValidate(), armnn::NeonRsqrtWorkloadValidate(), armnn::NeonSinWorkloadValidate(), armnn::NeonSqrtWorkloadValidate(), armnn::Rsqrt, armnn::Sin, and armnn::Sqrt.

Referenced by armnn::IsLayerTypeSupported().

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1149 of file NeonLayerSupport.cpp.

1153 {
1154  armnn::IgnoreUnused(input);
1155  armnn::IgnoreUnused(output);
1156  armnn::IgnoreUnused(descriptor);
1157 
1158  return IsNeonBackendSupported(reasonIfUnsupported);
1159 }

References armnn::IgnoreUnused().

Referenced by armnn::IsLayerTypeSupported().

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1161 of file NeonLayerSupport.cpp.

1164 {
1165  armnn::IgnoreUnused(output);
1166  return IsNeonBackendSupported(reasonIfUnsupported) &&
1167  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1168  input.GetDataType(),
1169  &FalseFuncF16<>,
1170  &TrueFunc<>,
1171  &FalseFuncU8<>,
1172  &FalseFuncI32<>,
1173  &FalseFuncU8<>);
1174 }

References TensorInfo::GetDataType(), armnn::IgnoreUnused(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by armnn::IsLayerTypeSupported().

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1176 of file NeonLayerSupport.cpp.

1182 {
1184  reasonIfUnsupported,
1185  input,
1186  output,
1187  weights,
1188  biases,
1189  descriptor,
1190  nullptr);
1191 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFullyConnectedWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsFusedSupported()

bool IsFusedSupported ( const std::vector< std::reference_wrapper< TensorInfo >> &  inputs,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const FusedDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1193 of file NeonLayerSupport.cpp.

1197 {
1199  reasonIfUnsupported,
1200  inputs,
1201  outputs,
1202  descriptor,
1203  nullptr);
1204 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFusedWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1220 of file NeonLayerSupport.cpp.

1224 {
1226  reasonIfUnsupported,
1227  input0,
1228  input1,
1229  output);
1230 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherNdWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1206 of file NeonLayerSupport.cpp.

1211 {
1213  reasonIfUnsupported,
1214  input0,
1215  input1,
1216  output,
1217  descriptor);
1218 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1232 of file NeonLayerSupport.cpp.

1234 {
1235  return IsNeonBackendSupported(reasonIfUnsupported, input);
1236 }

Referenced by armnn::IsLayerTypeSupported().

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1238 of file NeonLayerSupport.cpp.

1242 {
1244  reasonIfUnsupported,
1245  input,
1246  output,
1247  descriptor);
1248 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonInstanceNormalizationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1250 of file NeonLayerSupport.cpp.

1254 {
1255  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1256 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonL2NormalizationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmParamsInfo,
Optional< std::string & >  reasonIfUnsupported 
) const
overridevirtual

Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.

Reimplemented from LayerSupportBase.

Definition at line 722 of file NeonLayerSupport.cpp.

728 {
729  bool isSupported = IsLayerTypeSupported(type,
730  infos,
731  descriptor,
732  lstmParamsInfo,
733  quantizedLstmParamsInfo,
734  reasonIfUnsupported,
735  *this);
736 
737  // For android-nn-driver and support library, to run FP16 operations on CpuAcc we need at least v8.2
738  // architecture. If the available architecture is older than v8.2, we can check if the operator is
739  // supported by changing operator inputs & outputs to be FP32.
740  // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
741  // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
742  // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
743  // and, insert convert layers around the FP32 operator.
744  if (reasonIfUnsupported.has_value())
745  {
746  std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
747  if (!isSupported
748  && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
749  {
750  std::vector<TensorInfo> newInfos;
751  for (auto info: infos)
752  {
753  newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
754  }
755 
756  std::string tmpString;
757  return IsLayerTypeSupported(type,
758  newInfos,
759  descriptor,
760  lstmParamsInfo,
761  quantizedLstmParamsInfo,
762  tmpString,
763  *this);
764  }
765  }
766 
767  return isSupported;
768 }

References armnn::Float32, OptionalBase::has_value(), armnn::info, armnn::IsLayerTypeSupported(), and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1258 of file NeonLayerSupport.cpp.

1263 {
1264  switch(descriptor.m_Operation)
1265  {
1268  reasonIfUnsupported,
1269  input0,
1270  input1,
1271  output);
1274  reasonIfUnsupported,
1275  input0,
1276  input1,
1277  output);
1278  default:
1279  return false;
1280  }
1281 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::LogicalAnd, armnn::LogicalOr, LogicalBinaryDescriptor::m_Operation, armnn::NeonLogicalAndWorkloadValidate(), and armnn::NeonLogicalOrWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1283 of file NeonLayerSupport.cpp.

1287 {
1288  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1289 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLogSoftmaxWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1291 of file NeonLayerSupport.cpp.

1301 {
1303  reasonIfUnsupported,
1304  input,
1305  outputStateIn,
1306  cellStateIn,
1307  scratchBuffer,
1308  outputStateOut,
1309  cellStateOut,
1310  output,
1311  descriptor,
1312  paramsInfo);
1313 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLstmFloatWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1315 of file NeonLayerSupport.cpp.

1319 {
1321  reasonIfUnsupported,
1322  input0,
1323  input1,
1324  output);
1325 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMaximumWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1327 of file NeonLayerSupport.cpp.

1331 {
1333  reasonIfUnsupported,
1334  input,
1335  output,
1336  descriptor);
1337 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMeanWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1339 of file NeonLayerSupport.cpp.

1343 {
1345  reasonIfUnsupported,
1346  input0,
1347  input1,
1348  output);
1349 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMinimumWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1351 of file NeonLayerSupport.cpp.

1355 {
1357  reasonIfUnsupported,
1358  input0,
1359  input1,
1360  output,
1361  nullptr);
1362 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMultiplicationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1377 of file NeonLayerSupport.cpp.

1381 {
1383  reasonIfUnsupported,
1384  input,
1385  output,
1386  descriptor);
1387 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonNormalizationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1389 of file NeonLayerSupport.cpp.

1391 {
1392  return IsNeonBackendSupported(reasonIfUnsupported, output);
1393 }

Referenced by armnn::IsLayerTypeSupported().

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1395 of file NeonLayerSupport.cpp.

1399 {
1401  reasonIfUnsupported,
1402  input,
1403  output,
1404  descriptor);
1405 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPadWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1407 of file NeonLayerSupport.cpp.

1411 {
1412  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1413 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPermuteWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1415 of file NeonLayerSupport.cpp.

1419 {
1420  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1421 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling2dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1423 of file NeonLayerSupport.cpp.

1427 {
1428  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1429 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling3dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1431 of file NeonLayerSupport.cpp.

1435 {
1436  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1437 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPreluWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1439 of file NeonLayerSupport.cpp.

1448 {
1449  // Check required here in order to pass IsLayerSupported for datatypes tests
1450  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1451  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1452  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1453  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1454  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1455  output.GetDataType() == armnn::DataType::QAsymmS8)
1456  {
1458  reasonIfUnsupported,
1459  input,
1460  previousCellStateIn,
1461  previousOutputIn,
1462  cellStateOut,
1463  outputStateOut,
1464  output,
1465  descriptor,
1466  paramsInfo);
1467  }
1468  else
1469  {
1470  return false;
1471  }
1472 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonQLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by armnn::IsLayerTypeSupported().

◆ IsQuantizedLstmSupported()

bool IsQuantizedLstmSupported ( const TensorInfo input,
const TensorInfo cellStateIn,
const TensorInfo outputStateIn,
const TensorInfo cellStateOut,
const TensorInfo outputStateOut,
const QuantizedLstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1484 of file NeonLayerSupport.cpp.

1491 {
1493  reasonIfUnsupported,
1494  input,
1495  cellStateIn,
1496  outputStateIn,
1497  cellStateOut,
1498  outputStateOut,
1499  paramsInfo);
1500 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizedLstmWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1474 of file NeonLayerSupport.cpp.

1477 {
1479  reasonIfUnsupported,
1480  input,
1481  output);
1482 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1502 of file NeonLayerSupport.cpp.

1506 {
1508  reasonIfUnsupported,
1509  input,
1510  output,
1511  descriptor);
1512 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonReduceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1514 of file NeonLayerSupport.cpp.

1518 {
1519  armnn::IgnoreUnused(descriptor);
1521  reasonIfUnsupported,
1522  input,
1523  output);
1524 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonReshapeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1526 of file NeonLayerSupport.cpp.

1530 {
1532  reasonIfUnsupported,
1533  input,
1534  output,
1535  descriptor);
1536 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonResizeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsReverseV2Supported()

bool IsReverseV2Supported ( const TensorInfo input,
const TensorInfo axis,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1538 of file NeonLayerSupport.cpp.

1542 {
1544  reasonIfUnsupported,
1545  input,
1546  axis,
1547  output);
1548 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonReverseV2WorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1550 of file NeonLayerSupport.cpp.

1554 {
1556  reasonIfUnsupported,
1557  input,
1558  output,
1559  descriptor);
1560 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSliceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1562 of file NeonLayerSupport.cpp.

1566 {
1567  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1568 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSoftmaxWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1570 of file NeonLayerSupport.cpp.

1574 {
1576  reasonIfUnsupported,
1577  input,
1578  output,
1579  descriptor);
1580 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToBatchNdWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1582 of file NeonLayerSupport.cpp.

1586 {
1588  reasonIfUnsupported,
1589  input,
1590  output,
1591  descriptor);
1592 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToDepthWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1594 of file NeonLayerSupport.cpp.

1598 {
1599 #if defined(ARMCOMPUTENEON_ENABLED)
1600  // Split along the last dimension, cannot use sub-tensors
1601  // as width and height of the sub-tensors do not match
1602  // the width and height of the parent tensor
1603  // in case of input with more than 2D.
1604  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1605  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1606  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1607  {
1609  reasonIfUnsupported,
1610  input,
1611  outputs,
1612  *splitAxis.begin());
1613  }
1614 #endif
1615  IgnoreUnused(descriptor);
1616  for (auto output : outputs)
1617  {
1618  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1619  {
1620  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1621  return false;
1622  }
1623  }
1624  return true;
1625 }

References armnn::ComputeSplitAxis(), FORWARD_WORKLOAD_VALIDATE_FUNC, ViewsDescriptor::GetNumDimensions(), TensorInfo::GetShape(), armnn::IgnoreUnused(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonSplitterWorkloadValidate(), and armnn::SetValueChecked().

Referenced by armnn::IsLayerTypeSupported().

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo * > &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1627 of file NeonLayerSupport.cpp.

1631 {
1633  reasonIfUnsupported,
1634  inputs,
1635  output,
1636  descriptor);
1637 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStackWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1639 of file NeonLayerSupport.cpp.

1643 {
1645  reasonIfUnsupported,
1646  input,
1647  output,
1648  descriptor);
1649 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStridedSliceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1651 of file NeonLayerSupport.cpp.

1655 {
1657  reasonIfUnsupported,
1658  input0,
1659  input1,
1660  output,
1661  nullptr);
1662 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSubtractionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsTileSupported()

bool IsTileSupported ( const TensorInfo input0,
const TensorInfo output,
const TileDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1664 of file NeonLayerSupport.cpp.

1668 {
1670  reasonIfUnsupported,
1671  input,
1672  output,
1673  descriptor);
1674 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTileWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1676 of file NeonLayerSupport.cpp.

1682 {
1684  reasonIfUnsupported,
1685  input,
1686  output,
1687  descriptor,
1688  weights,
1689  biases);
1690 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeConvolution2dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1692 of file NeonLayerSupport.cpp.

1696 {
1697  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1698 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const UnidirectionalSequenceLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1700 of file NeonLayerSupport.cpp.

1709 {
1710  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1711  outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1712  cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1713  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1714  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1715  output.GetDataType() == armnn::DataType::QAsymmS8)
1716  {
1718  reasonIfUnsupported,
1719  input,
1720  outputStateIn,
1721  cellStateIn,
1722  outputStateOut,
1723  cellStateOut,
1724  output,
1725  descriptor,
1726  paramsInfo);
1727  }
1728  else
1729  {
1731  reasonIfUnsupported,
1732  input,
1733  outputStateIn,
1734  cellStateIn,
1735  outputStateOut,
1736  cellStateOut,
1737  output,
1738  descriptor,
1739  paramsInfo);
1740  }
1741 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by armnn::IsLayerTypeSupported().


The documentation for this class was generated from the following files:
armnn::NeonMinimumWorkloadValidate
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
Definition: NeonMinimumWorkload.cpp:15
armnn::NeonFullyConnectedWorkloadValidate
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFullyConnectedWorkload.cpp:24
armnn::NeonSpaceToBatchNdWorkloadValidate
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: NeonSpaceToBatchNdWorkload.cpp:15
armnn::NeonReverseV2WorkloadValidate
arm_compute::Status NeonReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)
Definition: NeonReverseV2Workload.cpp:14
armnn::NeonTileWorkloadValidate
arm_compute::Status NeonTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: NeonTileWorkload.cpp:14
armnn::NeonConvertFp32ToFp16WorkloadValidate
arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp32ToFp16Workload.cpp:21
armnn::NeonAdditionWorkloadValidate
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonAdditionWorkload.cpp:20
armnn::NeonAbsWorkloadValidate
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonAbsWorkload.cpp:17
armnn::NeonMultiplicationWorkloadValidate
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonMultiplicationWorkload.cpp:19
armnn::NeonStackWorkloadValidate
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: NeonStackWorkload.cpp:27
armnn::NeonPooling3dWorkloadValidate
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: NeonPooling3dWorkload.cpp:15
armnn::NeonNegWorkloadValidate
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonNegWorkload.cpp:17
armnn::NeonQLstmWorkloadValidate
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonQLstmWorkload.cpp:243
armnn::NeonGatherWorkloadValidate
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: NeonGatherWorkload.cpp:13
armnn::NeonConvolution3dWorkloadValidate
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution3dWorkload.cpp:24
armnn::NeonSubtractionWorkloadValidate
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonSubtractionWorkload.cpp:22
armnn::NeonInstanceNormalizationWorkloadValidate
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: NeonInstanceNormalizationWorkload.cpp:19
armnn::NeonMeanWorkloadValidate
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: NeonMeanWorkload.cpp:18
armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmFloatWorkload.cpp:510
armnn::NeonMaximumWorkloadValidate
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonMaximumWorkload.cpp:14
armnn::DataType::Float32
@ Float32
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
armnn::NeonResizeWorkloadValidate
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: NeonResizeWorkload.cpp:22
armnn::NeonExpWorkloadValidate
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonExpWorkload.cpp:17
armnn::NeonPermuteWorkloadValidate
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: NeonPermuteWorkload.cpp:15
armnn::NeonLogicalNotWorkloadValidate
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogicalNotWorkload.cpp:19
armnn::NeonBatchMatMulValidate
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchMatMulWorkload.cpp:19
armnn::UnaryOperation::Neg
@ Neg
armnn::DataType::QSymmS16
@ QSymmS16
armnn::NeonConvertFp16ToFp32WorkloadValidate
arm_compute::Status NeonConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp16ToFp32Workload.cpp:19
armnn::NeonLogWorkloadValidate
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogWorkload.cpp:17
armnn::NeonArgMinMaxWorkloadValidate
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: NeonArgMinMaxWorkload.cpp:31
armnn::NeonLogSoftmaxWorkloadValidate
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: NeonLogSoftmaxWorkload.cpp:19
armnn::NeonNormalizationWorkloadValidate
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: NeonNormalizationFloatWorkload.cpp:49
armnn::NeonConstantWorkloadValidate
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
Definition: NeonConstantWorkload.cpp:20
armnn::NeonReduceWorkloadValidate
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: NeonReduceWorkload.cpp:19
armnn::NeonPadWorkloadValidate
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: NeonPadWorkload.cpp:59
armnn::NeonGatherNdWorkloadValidate
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: NeonGatherNdWorkload.cpp:14
armnn::NeonConcatWorkloadValidate
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: NeonConcatWorkload.cpp:27
armnn::NeonLstmFloatWorkloadValidate
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonLstmFloatWorkload.cpp:253
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::NeonDivisionWorkloadValidate
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonDivisionWorkload.cpp:18
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::NeonSqrtWorkloadValidate
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSqrtWorkload.cpp:18
armnn::NeonDepthToSpaceWorkloadValidate
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: NeonDepthToSpaceWorkload.cpp:19
armnn::UnaryOperation::Exp
@ Exp
armnn::NeonTransposeWorkloadValidate
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: NeonTransposeWorkload.cpp:15
armnn::NeonSoftmaxWorkloadValidate
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: NeonSoftmaxWorkload.cpp:19
armnn::NeonSpaceToDepthWorkloadValidate
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: NeonSpaceToDepthWorkload.cpp:19
armnn::UnaryOperation::Sin
@ Sin
armnn::NeonPreluWorkloadValidate
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: NeonPreluWorkload.cpp:17
armnn::NeonDequantizeWorkloadValidate
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonDequantizeWorkload.cpp:22
armnn::NeonLogicalAndWorkloadValidate
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalAndWorkload.cpp:18
armnn::NeonDepthwiseConvolutionWorkloadValidate
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: NeonDepthwiseConvolutionWorkload.cpp:29
armnn::NeonQuantizedLstmWorkloadValidate
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: NeonQuantizedLstmWorkload.cpp:131
armnn::NeonActivationWorkloadValidate
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: NeonActivationWorkload.cpp:17
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::NeonQuantizeWorkloadValidate
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonQuantizeWorkload.cpp:18
armnn::NeonBatchToSpaceNdWorkloadValidate
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: NeonBatchToSpaceNdWorkload.cpp:15
armnn::NeonCastValidate
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonCastWorkload.cpp:19
armnn::UnaryOperation::Log
@ Log
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: NeonLayerSupport.cpp:166
armnn::NeonSplitterWorkloadValidate
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: NeonSplitterWorkload.cpp:33
armnn::NeonFusedWorkloadValidate
arm_compute::Status NeonFusedWorkloadValidate(const std::vector< std::reference_wrapper< TensorInfo >> &inputInfos, const std::vector< std::reference_wrapper< TensorInfo >> &outputInfos, const FusedDescriptor &fusedDescriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFusedWorkload.cpp:22
armnn::NeonRsqrtWorkloadValidate
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonRsqrtWorkload.cpp:18
armnn::NeonUnidirectionalSequenceLstmWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:491
armnn::NeonReshapeWorkloadValidate
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonReshapeWorkload.cpp:17
armnn::NeonConvolution2dWorkloadValidate
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution2dWorkload.cpp:24
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::NeonBatchNormalizationValidate
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchNormalizationWorkload.cpp:24
armnn::NeonSinWorkloadValidate
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSinWorkload.cpp:17
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::NeonStridedSliceWorkloadValidate
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: NeonStridedSliceWorkload.cpp:19
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
Definition: WorkloadUtils.cpp:377
armnn::UnaryOperation::Abs
@ Abs
armnn::NeonSliceWorkloadValidate
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: NeonSliceWorkload.cpp:21
armnn::NeonPooling2dWorkloadValidate
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: NeonPooling2dWorkload.cpp:22
armnn::IsLayerTypeSupported
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)
Definition: NeonLayerSupport.cpp:184
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
armnn::NeonChannelShuffleValidate
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: NeonChannelShuffleWorkload.cpp:17
armnn::NeonL2NormalizationWorkloadValidate
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: NeonL2NormalizationFloatWorkload.cpp:19
armnn::NeonComparisonWorkloadValidate
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: NeonComparisonWorkload.cpp:16
armnn::NeonTransposeConvolution2dWorkloadValidate
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: NeonTransposeConvolution2dWorkload.cpp:25
armnn::NeonLogicalOrWorkloadValidate
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalOrWorkload.cpp:18