ArmNN
 25.02
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
NeonLayerSupport Class Reference

#include <NeonLayerSupport.hpp>

Inheritance diagram for NeonLayerSupport:
[legend]
Collaboration diagram for NeonLayerSupport:
[legend]

Public Member Functions

 NeonLayerSupport (const IBackendInternal::IBackendSpecificModelContextPtr &modelContextPtr)
 
 NeonLayerSupport ()
 
 ~NeonLayerSupport ()
 
bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
 Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported. More...
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsFusedSupported (const std::vector< std::reference_wrapper< TensorInfo >> &inputs, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const FusedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsReverseV2Supported (const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsTileSupported (const TensorInfo &input0, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
 
- Public Member Functions inherited from LayerSupportBase
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 14 of file NeonLayerSupport.hpp.

Constructor & Destructor Documentation

◆ NeonLayerSupport() [1/2]

Definition at line 175 of file NeonLayerSupport.cpp.

176  : m_ModelContextPtr(modelContextPtr)
177 {
178 }

◆ NeonLayerSupport() [2/2]

Definition at line 180 of file NeonLayerSupport.cpp.

181  : m_ModelContextPtr(nullptr)
182 {
183 }

◆ ~NeonLayerSupport()

~NeonLayerSupport ( )
inline

Definition at line 20 of file NeonLayerSupport.hpp.

20 {}

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 779 of file NeonLayerSupport.cpp.

783 {
784  IgnoreUnused(descriptor);
786  reasonIfUnsupported,
787  input,
788  output,
789  descriptor);
790 }
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
void IgnoreUnused(Ts &&...)
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonActivationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 792 of file NeonLayerSupport.cpp.

796 {
798  reasonIfUnsupported,
799  input0,
800  input1,
801  output,
802  nullptr);
803 }
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonAdditionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 805 of file NeonLayerSupport.cpp.

809 {
811  reasonIfUnsupported,
812  input,
813  output,
814  descriptor);
815 }
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonArgMinMaxWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo inputX,
const TensorInfo inputY,
const TensorInfo output,
const BatchMatMulDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 817 of file NeonLayerSupport.cpp.

822 {
823  bool isFastMathEnabled = false;
824 #if defined(ARMCOMPUTENEON_ENABLED)
825  if (m_ModelContextPtr)
826  {
827  if (m_ModelContextPtr.get() != nullptr)
828  {
829  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
830  if (modelOptions)
831  {
832  isFastMathEnabled = modelOptions->IsFastMathEnabled();
833  }
834  }
835  }
836 #endif
838  reasonIfUnsupported,
839  inputX,
840  inputY,
841  output,
842  descriptor,
843  isFastMathEnabled,
844  nullptr);
845 }
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonBatchMatMulValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 847 of file NeonLayerSupport.cpp.

855 {
857  reasonIfUnsupported,
858  input,
859  output,
860  mean,
861  var,
862  beta,
863  gamma,
864  descriptor,
865  nullptr);
866 }
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchNormalizationValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 868 of file NeonLayerSupport.cpp.

872 {
874  reasonIfUnsupported,
875  input,
876  output,
877  descriptor);
878 }
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchToSpaceNdWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 880 of file NeonLayerSupport.cpp.

883 {
885  reasonIfUnsupported,
886  input,
887  output);
888 }
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonCastValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 890 of file NeonLayerSupport.cpp.

894 {
896  reasonIfUnsupported,
897  input,
898  output,
899  descriptor);
900 }
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonChannelShuffleValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 902 of file NeonLayerSupport.cpp.

907 {
908 
910  reasonIfUnsupported,
911  input0,
912  input1,
913  output,
914  descriptor);
915 }
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonComparisonWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo * >  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 917 of file NeonLayerSupport.cpp.

921 {
922  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
923  {
924  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
925  return false;
926  }
927 
928  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
929  if(concatInnerAxis < 3) // Width, height, or channels
930  {
932  reasonIfUnsupported,
933  inputs,
934  output,
935  descriptor);
936  }
937  else if (concatInnerAxis == 3)
938  {
939  for (auto& input : inputs)
940  {
941  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
942  {
943  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
944  return false;
945  }
946  }
947  return true; // Sub-tensors support concat along batch
948  }
949  else // > 4 dimensions not supported.
950  {
951  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
952  return false;
953  }
954 }
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
void SetValueChecked(Optional< T & > optionalRef, V &&val)

References FORWARD_WORKLOAD_VALIDATE_FUNC, OriginsDescriptor::GetConcatAxis(), OriginsDescriptor::GetNumDimensions(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonConcatWorkloadValidate(), and armnn::SetValueChecked().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 956 of file NeonLayerSupport.cpp.

958 {
960  reasonIfUnsupported,
961  output);
962 }
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConstantWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 964 of file NeonLayerSupport.cpp.

967 {
969  reasonIfUnsupported,
970  input,
971  output);
972 }
arm_compute::Status NeonConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvertFp16ToFp32WorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 974 of file NeonLayerSupport.cpp.

977 {
979  reasonIfUnsupported,
980  input,
981  output);
982 }
arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConvertFp32ToFp16WorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 984 of file NeonLayerSupport.cpp.

990 {
991  bool isFastMathEnabled = false;
992 #if defined(ARMCOMPUTENEON_ENABLED)
993  if (m_ModelContextPtr)
994  {
995  if (m_ModelContextPtr.get() != nullptr)
996  {
997  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
998  if (modelOptions)
999  {
1000  isFastMathEnabled = modelOptions->IsFastMathEnabled();
1001  }
1002  }
1003  }
1004 #endif
1005 
1007  reasonIfUnsupported,
1008  input,
1009  output,
1010  descriptor,
1011  weights,
1012  biases,
1013  isFastMathEnabled,
1014  nullptr);
1015 }
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonConvolution2dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1017 of file NeonLayerSupport.cpp.

1023 {
1024  bool isFastMathEnabled = false;
1025 #if defined(ARMCOMPUTENEON_ENABLED)
1026  if (m_ModelContextPtr)
1027  {
1028  if (m_ModelContextPtr.get() != nullptr)
1029  {
1030  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
1031  if (modelOptions)
1032  {
1033  isFastMathEnabled = modelOptions->IsFastMathEnabled();
1034  }
1035  }
1036  }
1037 #endif
1038 
1040  reasonIfUnsupported,
1041  input,
1042  output,
1043  descriptor,
1044  weights,
1045  biases,
1046  isFastMathEnabled,
1047  nullptr);
1048 }
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonConvolution3dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1050 of file NeonLayerSupport.cpp.

1054 {
1056  reasonIfUnsupported,
1057  input,
1058  output,
1059  descriptor);
1060 }
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthToSpaceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1062 of file NeonLayerSupport.cpp.

1068 {
1070  reasonIfUnsupported,
1071  input,
1072  output,
1073  descriptor,
1074  weights,
1075  biases,
1076  nullptr);
1077 }
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1079 of file NeonLayerSupport.cpp.

1082 {
1084  reasonIfUnsupported,
1085  input,
1086  output);
1087 }
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDequantizeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reason = EmptyOptional() 
) const

Definition at line 1089 of file NeonLayerSupport.cpp.

1095 {
1097  reasonIfUnsupported,
1098  input,
1099  output,
1100  descriptor,
1101  weights,
1102  biases,
1103  nullptr);
1104 }

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1373 of file NeonLayerSupport.cpp.

1377 {
1379  reasonIfUnsupported,
1380  input0,
1381  input1,
1382  output,
1383  nullptr);
1384 }
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDivisionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1106 of file NeonLayerSupport.cpp.

1110 {
1111  switch(descriptor.m_Operation)
1112  {
1113  case UnaryOperation::Abs:
1115  reasonIfUnsupported,
1116  input,
1117  output);
1118  case UnaryOperation::Exp:
1120  reasonIfUnsupported,
1121  input,
1122  output);
1125  reasonIfUnsupported,
1126  input,
1127  output);
1128  case UnaryOperation::Log:
1130  reasonIfUnsupported,
1131  input,
1132  output);
1133  case UnaryOperation::Neg:
1135  reasonIfUnsupported,
1136  input,
1137  output);
1138  case UnaryOperation::Rsqrt:
1140  reasonIfUnsupported,
1141  input,
1142  output);
1143  case UnaryOperation::Sin:
1145  reasonIfUnsupported,
1146  input,
1147  output);
1148  case UnaryOperation::Sqrt:
1150  reasonIfUnsupported,
1151  input,
1152  output);
1153  default:
1154  return false;
1155  }
1156 }
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)

References armnn::Abs, armnn::Exp, FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::Log, armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::Neg, armnn::NeonAbsWorkloadValidate(), armnn::NeonExpWorkloadValidate(), armnn::NeonLogicalNotWorkloadValidate(), armnn::NeonLogWorkloadValidate(), armnn::NeonNegWorkloadValidate(), armnn::NeonRsqrtWorkloadValidate(), armnn::NeonSinWorkloadValidate(), armnn::NeonSqrtWorkloadValidate(), armnn::Rsqrt, armnn::Sin, and armnn::Sqrt.

Referenced by armnn::IsLayerTypeSupported().

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1158 of file NeonLayerSupport.cpp.

1162 {
1163  armnn::IgnoreUnused(input);
1164  armnn::IgnoreUnused(output);
1165  armnn::IgnoreUnused(descriptor);
1166 
1167  return IsNeonBackendSupported(reasonIfUnsupported);
1168 }

References armnn::IgnoreUnused().

Referenced by armnn::IsLayerTypeSupported().

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1170 of file NeonLayerSupport.cpp.

1173 {
1174  armnn::IgnoreUnused(output);
1175  return IsNeonBackendSupported(reasonIfUnsupported) &&
1176  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1177  input.GetDataType(),
1178  &FalseFuncF16<>,
1179  &TrueFunc<>,
1180  &FalseFuncU8<>,
1181  &FalseFuncI32<>,
1182  &FalseFuncU8<>);
1183 }
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

References TensorInfo::GetDataType(), armnn::IgnoreUnused(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by armnn::IsLayerTypeSupported().

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1185 of file NeonLayerSupport.cpp.

1191 {
1193  reasonIfUnsupported,
1194  input,
1195  output,
1196  weights,
1197  biases,
1198  descriptor,
1199  nullptr);
1200 }
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFullyConnectedWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsFusedSupported()

bool IsFusedSupported ( const std::vector< std::reference_wrapper< TensorInfo >> &  inputs,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const FusedDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1202 of file NeonLayerSupport.cpp.

1206 {
1208  reasonIfUnsupported,
1209  inputs,
1210  outputs,
1211  descriptor,
1212  nullptr);
1213 }
arm_compute::Status NeonFusedWorkloadValidate(const std::vector< std::reference_wrapper< TensorInfo >> &inputInfos, const std::vector< std::reference_wrapper< TensorInfo >> &outputInfos, const FusedDescriptor &fusedDescriptor, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFusedWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1229 of file NeonLayerSupport.cpp.

1233 {
1235  reasonIfUnsupported,
1236  input0,
1237  input1,
1238  output);
1239 }
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherNdWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1215 of file NeonLayerSupport.cpp.

1220 {
1222  reasonIfUnsupported,
1223  input0,
1224  input1,
1225  output,
1226  descriptor);
1227 }
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1241 of file NeonLayerSupport.cpp.

1243 {
1244  return IsNeonBackendSupported(reasonIfUnsupported, input);
1245 }

Referenced by armnn::IsLayerTypeSupported().

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1247 of file NeonLayerSupport.cpp.

1251 {
1253  reasonIfUnsupported,
1254  input,
1255  output,
1256  descriptor);
1257 }
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonInstanceNormalizationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1259 of file NeonLayerSupport.cpp.

1263 {
1264  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1265 }
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonL2NormalizationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmParamsInfo,
Optional< std::string & >  reasonIfUnsupported 
) const
overridevirtual

Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.

Reimplemented from LayerSupportBase.

Definition at line 731 of file NeonLayerSupport.cpp.

737 {
738  bool isSupported = IsLayerTypeSupported(type,
739  infos,
740  descriptor,
741  lstmParamsInfo,
742  quantizedLstmParamsInfo,
743  reasonIfUnsupported,
744  *this);
745 
746  // For android-nn-driver to run FP16 operations on CpuAcc we need at least v8.2
747  // architecture. If the available architecture is older than v8.2, we can check if the operator is
748  // supported by changing operator inputs & outputs to be FP32.
749  // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
750  // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
751  // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
752  // and, insert convert layers around the FP32 operator.
753  if (reasonIfUnsupported.has_value())
754  {
755  std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
756  if (!isSupported
757  && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
758  {
759  std::vector<TensorInfo> newInfos;
760  for (auto info: infos)
761  {
762  newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
763  }
764 
765  std::string tmpString;
766  return IsLayerTypeSupported(type,
767  newInfos,
768  descriptor,
769  lstmParamsInfo,
770  quantizedLstmParamsInfo,
771  tmpString,
772  *this);
773  }
774  }
775 
776  return isSupported;
777 }
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)

References armnn::Float32, OptionalBase::has_value(), armnn::info, armnn::IsLayerTypeSupported(), and OptionalReferenceSwitch< IsReference, T >::value().

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1267 of file NeonLayerSupport.cpp.

1272 {
1273  switch(descriptor.m_Operation)
1274  {
1277  reasonIfUnsupported,
1278  input0,
1279  input1,
1280  output);
1283  reasonIfUnsupported,
1284  input0,
1285  input1,
1286  output);
1287  default:
1288  return false;
1289  }
1290 }
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::LogicalAnd, armnn::LogicalOr, LogicalBinaryDescriptor::m_Operation, armnn::NeonLogicalAndWorkloadValidate(), and armnn::NeonLogicalOrWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1292 of file NeonLayerSupport.cpp.

1296 {
1297  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1298 }
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLogSoftmaxWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1300 of file NeonLayerSupport.cpp.

1310 {
1312  reasonIfUnsupported,
1313  input,
1314  outputStateIn,
1315  cellStateIn,
1316  scratchBuffer,
1317  outputStateOut,
1318  cellStateOut,
1319  output,
1320  descriptor,
1321  paramsInfo);
1322 }
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLstmFloatWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1324 of file NeonLayerSupport.cpp.

1328 {
1330  reasonIfUnsupported,
1331  input0,
1332  input1,
1333  output);
1334 }
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMaximumWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1336 of file NeonLayerSupport.cpp.

1340 {
1342  reasonIfUnsupported,
1343  input,
1344  output,
1345  descriptor);
1346 }
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMeanWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1348 of file NeonLayerSupport.cpp.

1352 {
1354  reasonIfUnsupported,
1355  input0,
1356  input1,
1357  output);
1358 }
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMinimumWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1360 of file NeonLayerSupport.cpp.

1364 {
1366  reasonIfUnsupported,
1367  input0,
1368  input1,
1369  output,
1370  nullptr);
1371 }
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMultiplicationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1386 of file NeonLayerSupport.cpp.

1390 {
1392  reasonIfUnsupported,
1393  input,
1394  output,
1395  descriptor);
1396 }
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonNormalizationWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1398 of file NeonLayerSupport.cpp.

1400 {
1401  return IsNeonBackendSupported(reasonIfUnsupported, output);
1402 }

Referenced by armnn::IsLayerTypeSupported().

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1404 of file NeonLayerSupport.cpp.

1408 {
1410  reasonIfUnsupported,
1411  input,
1412  output,
1413  descriptor);
1414 }
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPadWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1416 of file NeonLayerSupport.cpp.

1420 {
1421  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1422 }
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPermuteWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1424 of file NeonLayerSupport.cpp.

1428 {
1429  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1430 }
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling2dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1432 of file NeonLayerSupport.cpp.

1436 {
1437  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1438 }
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling3dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1440 of file NeonLayerSupport.cpp.

1444 {
1445  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1446 }
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPreluWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1448 of file NeonLayerSupport.cpp.

1457 {
1458  // Check required here in order to pass IsLayerSupported for datatypes tests
1459  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1460  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1461  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1462  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1463  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1464  output.GetDataType() == armnn::DataType::QAsymmS8)
1465  {
1467  reasonIfUnsupported,
1468  input,
1469  previousCellStateIn,
1470  previousOutputIn,
1471  cellStateOut,
1472  outputStateOut,
1473  output,
1474  descriptor,
1475  paramsInfo);
1476  }
1477  else
1478  {
1479  return false;
1480  }
1481 }
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonQLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by armnn::IsLayerTypeSupported().

◆ IsQuantizedLstmSupported()

bool IsQuantizedLstmSupported ( const TensorInfo input,
const TensorInfo cellStateIn,
const TensorInfo outputStateIn,
const TensorInfo cellStateOut,
const TensorInfo outputStateOut,
const QuantizedLstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1493 of file NeonLayerSupport.cpp.

1500 {
1502  reasonIfUnsupported,
1503  input,
1504  cellStateIn,
1505  outputStateIn,
1506  cellStateOut,
1507  outputStateOut,
1508  paramsInfo);
1509 }
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizedLstmWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1483 of file NeonLayerSupport.cpp.

1486 {
1488  reasonIfUnsupported,
1489  input,
1490  output);
1491 }
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1511 of file NeonLayerSupport.cpp.

1515 {
1517  reasonIfUnsupported,
1518  input,
1519  output,
1520  descriptor);
1521 }
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonReduceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1523 of file NeonLayerSupport.cpp.

1527 {
1528  armnn::IgnoreUnused(descriptor);
1530  reasonIfUnsupported,
1531  input,
1532  output);
1533 }
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonReshapeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1535 of file NeonLayerSupport.cpp.

1539 {
1541  reasonIfUnsupported,
1542  input,
1543  output,
1544  descriptor);
1545 }
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonResizeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsReverseV2Supported()

bool IsReverseV2Supported ( const TensorInfo input,
const TensorInfo axis,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1547 of file NeonLayerSupport.cpp.

1551 {
1553  reasonIfUnsupported,
1554  input,
1555  axis,
1556  output);
1557 }
arm_compute::Status NeonReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonReverseV2WorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1559 of file NeonLayerSupport.cpp.

1563 {
1565  reasonIfUnsupported,
1566  input,
1567  output,
1568  descriptor);
1569 }
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSliceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1571 of file NeonLayerSupport.cpp.

1575 {
1576  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1577 }
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSoftmaxWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1579 of file NeonLayerSupport.cpp.

1583 {
1585  reasonIfUnsupported,
1586  input,
1587  output,
1588  descriptor);
1589 }
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToBatchNdWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1591 of file NeonLayerSupport.cpp.

1595 {
1597  reasonIfUnsupported,
1598  input,
1599  output,
1600  descriptor);
1601 }
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToDepthWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1603 of file NeonLayerSupport.cpp.

1607 {
1608 #if defined(ARMCOMPUTENEON_ENABLED)
1609  // Split along the last dimension, cannot use sub-tensors
1610  // as width and height of the sub-tensors do not match
1611  // the width and height of the parent tensor
1612  // in case of input with more than 2D.
1613  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1614  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1615  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1616  {
1618  reasonIfUnsupported,
1619  input,
1620  outputs,
1621  *splitAxis.begin());
1622  }
1623 #endif
1624  IgnoreUnused(descriptor);
1625  for (auto output : outputs)
1626  {
1627  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1628  {
1629  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1630  return false;
1631  }
1632  }
1633  return true;
1634 }
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)

References armnn::ComputeSplitAxis(), FORWARD_WORKLOAD_VALIDATE_FUNC, ViewsDescriptor::GetNumDimensions(), TensorInfo::GetShape(), armnn::IgnoreUnused(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonSplitterWorkloadValidate(), and armnn::SetValueChecked().

Referenced by armnn::IsLayerTypeSupported().

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo * > &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1636 of file NeonLayerSupport.cpp.

1640 {
1642  reasonIfUnsupported,
1643  inputs,
1644  output,
1645  descriptor);
1646 }
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStackWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1648 of file NeonLayerSupport.cpp.

1652 {
1654  reasonIfUnsupported,
1655  input,
1656  output,
1657  descriptor);
1658 }
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStridedSliceWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1660 of file NeonLayerSupport.cpp.

1664 {
1666  reasonIfUnsupported,
1667  input0,
1668  input1,
1669  output,
1670  nullptr);
1671 }
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSubtractionWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsTileSupported()

bool IsTileSupported ( const TensorInfo input0,
const TensorInfo output,
const TileDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1673 of file NeonLayerSupport.cpp.

1677 {
1679  reasonIfUnsupported,
1680  input,
1681  output,
1682  descriptor);
1683 }
arm_compute::Status NeonTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTileWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1685 of file NeonLayerSupport.cpp.

1691 {
1693  reasonIfUnsupported,
1694  input,
1695  output,
1696  descriptor,
1697  weights,
1698  biases);
1699 }
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeConvolution2dWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1701 of file NeonLayerSupport.cpp.

1705 {
1706  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1707 }
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeWorkloadValidate().

Referenced by armnn::IsLayerTypeSupported().

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const UnidirectionalSequenceLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported 
) const

Definition at line 1709 of file NeonLayerSupport.cpp.

1718 {
1719  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1720  outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1721  cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1722  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1723  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1724  output.GetDataType() == armnn::DataType::QAsymmS8)
1725  {
1727  reasonIfUnsupported,
1728  input,
1729  outputStateIn,
1730  cellStateIn,
1731  outputStateOut,
1732  cellStateOut,
1733  output,
1734  descriptor,
1735  paramsInfo);
1736  }
1737  else
1738  {
1740  reasonIfUnsupported,
1741  input,
1742  outputStateIn,
1743  cellStateIn,
1744  outputStateOut,
1745  cellStateOut,
1746  output,
1747  descriptor,
1748  paramsInfo);
1749  }
1750 }
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by armnn::IsLayerTypeSupported().


The documentation for this class was generated from the following files: