18#if defined(ARMCOMPUTECL_ENABLED)
101template<
unsigned int FilterSize>
102bool IsMatchingSize2d(
const TensorInfo& weightInfo)
105 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
108template<u
int32_t Val
idStr
ide>
109bool IsMatchingStride(uint32_t actualStride)
111 return ValidStride == actualStride;
114template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
115bool IsMatchingStride(uint32_t actualStride)
117 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
120template<
typename ... Args>
124#if defined(ARMCOMPUTECL_ENABLED)
127 if (reasonIfUnsupported)
129 reasonIfUnsupported.value() =
"The armnn library has been built without CL support";
135#if defined(ARMCOMPUTECL_ENABLED)
136#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
138#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
141#if defined(ARMCOMPUTECL_ENABLED)
142template<
class FuncType,
class... Args>
143inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
145 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
146 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
147 if (!supported && reasonIfUnsupported)
149 reasonIfUnsupported.value() = aclStatus.error_description();
154#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
155 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
157#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
158 return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
161template<
typename FloatFunc,
typename Uint8Func,
typename ... Params>
162bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
164 FloatFunc floatFuncPtr,
165 Uint8Func uint8FuncPtr,
168 return IsClBackendSupported(reasonIfUnsupported) &&
176 std::forward<Params>(params)...);
181 : m_ModelContextPtr(modelContextPtr)
186 : m_ModelContextPtr(nullptr)
191 const std::vector<TensorInfo>& infos,
203 reasonIfUnsupported);
212 reasonIfUnsupported);
218 reasonIfUnsupported);
228 reasonIfUnsupported);
233 reasonIfUnsupported);
240 reasonIfUnsupported);
246 reasonIfUnsupported);
249 std::vector<const TensorInfo*> inputInfos;
250 for (uint32_t i = 0; i < (infos.size() - 1); i++)
252 inputInfos.push_back(&infos[i]);
255 infos[infos.size() - 1],
257 reasonIfUnsupported);
267 if (infos.size() != 4)
270 "TensorInfos should be of format: {input, output, weights, biases}.");
281 reasonIfUnsupported);
290 reasonIfUnsupported);
295 if (infos.size() != 4)
298 "TensorInfos should be of format: {input, output, weights, biases}.");
309 reasonIfUnsupported);
318 reasonIfUnsupported);
325 reasonIfUnsupported);
328 if (infos.size() != 4)
331 "TensorInfos should be of format: {input, output, weights, biases}.");
342 reasonIfUnsupported);
351 reasonIfUnsupported);
364 switch (desc.m_Operation)
430 reasonIfUnsupported);
435 reasonIfUnsupported);
444 reasonIfUnsupported);
450 reasonIfUnsupported);
455 reasonIfUnsupported);
463 reasonIfUnsupported);
468 reasonIfUnsupported);
474 reasonIfUnsupported);
479 reasonIfUnsupported);
489 lstmParamsInfo.
value(),
490 reasonIfUnsupported);
501 reasonIfUnsupported);
510 reasonIfUnsupported);
523 reasonIfUnsupported);
530 reasonIfUnsupported);
535 reasonIfUnsupported);
540 reasonIfUnsupported);
545 reasonIfUnsupported);
556 lstmParamsInfo.
value(),
557 reasonIfUnsupported);
566 quantizedLstmParamsInfo.
value(),
567 reasonIfUnsupported);
574 reasonIfUnsupported);
579 reasonIfUnsupported);
584 reasonIfUnsupported);
589 reasonIfUnsupported);
596 reasonIfUnsupported);
600 reasonIfUnsupported);
605 reasonIfUnsupported);
610 reasonIfUnsupported);
615 reasonIfUnsupported);
620 reasonIfUnsupported);
623 std::vector<TensorInfo> outputInfos;
624 for (uint32_t i = 1; i < infos.size(); i++)
626 outputInfos.push_back(infos[i]);
629 {outputInfos.begin(), outputInfos.end()},
631 reasonIfUnsupported);
635 std::vector<const TensorInfo*> inputInfos;
636 for (uint32_t i = 0; i < infos.size() - 1; i++)
638 inputInfos.push_back(&infos[i]);
641 infos[infos.size() - 1],
643 reasonIfUnsupported);
649 reasonIfUnsupported);
658 reasonIfUnsupported);
663 reasonIfUnsupported);
666 if (infos.size() != 4)
669 "TensorInfos should be of format: {input, output, weights, biases}.");
680 reasonIfUnsupported);
689 reasonIfUnsupported);
701 lstmParamsInfo.
value(),
702 reasonIfUnsupported);
842 SetValueChecked(reasonIfUnsupported,
"Cl Concat: Concat axis > Number of dimensions.");
847 if(concatInnerAxis < 3)
855 else if (concatInnerAxis == 3)
859 for (
auto& input : inputs)
863 SetValueChecked(reasonIfUnsupported,
"Cl Concat: Types and quantization parameters must match.");
871 SetValueChecked(reasonIfUnsupported,
"Cl Concat: Maximum of 4 dimensions supported.");
911 bool isFastMathEnabled =
false;
912#if defined(ARMCOMPUTECL_ENABLED)
913 if (m_ModelContextPtr)
915 if (m_ModelContextPtr.get() !=
nullptr)
920 isFastMathEnabled = modelOptions->IsFastMathEnabled();
944 bool isFastMathEnabled =
false;
945#if defined(ARMCOMPUTECL_ENABLED)
946 if (m_ModelContextPtr)
948 if (m_ModelContextPtr.get() !=
nullptr)
953 isFastMathEnabled = modelOptions->IsFastMathEnabled();
1000 reasonIfUnsupported,
1017 reasonIfUnsupported,
1033 reasonIfUnsupported,
1049 reasonIfUnsupported,
1054 reasonIfUnsupported,
1059 reasonIfUnsupported,
1064 reasonIfUnsupported,
1069 reasonIfUnsupported,
1074 reasonIfUnsupported,
1079 reasonIfUnsupported,
1084 reasonIfUnsupported,
1101 return IsClBackendSupported(reasonIfUnsupported);
1109 reasonIfUnsupported,
1122 reasonIfUnsupported,
1138 reasonIfUnsupported,
1151 reasonIfUnsupported,
1160 return IsClBackendSupported(reasonIfUnsupported, input);
1169 reasonIfUnsupported,
1181 reasonIfUnsupported,
1199 reasonIfUnsupported,
1205 reasonIfUnsupported,
1221 reasonIfUnsupported,
1239 reasonIfUnsupported,
1257 reasonIfUnsupported,
1269 reasonIfUnsupported,
1281 reasonIfUnsupported,
1293 reasonIfUnsupported,
1311 return IsClBackendSupported(reasonIfUnsupported, output);
1320 reasonIfUnsupported,
1376 reasonIfUnsupported,
1378 previousCellStateIn,
1401 reasonIfUnsupported,
1403 previousCellStateIn,
1415 reasonIfUnsupported,
1426 reasonIfUnsupported,
1455 reasonIfUnsupported,
1469 reasonIfUnsupported,
1499 reasonIfUnsupported,
1511 reasonIfUnsupported,
1518 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1522#if defined(ARMCOMPUTECL_ENABLED)
1532 reasonIfUnsupported,
1535 *splitAxis.begin());
1539 for (
auto output : outputs)
1543 SetValueChecked(reasonIfUnsupported,
"Cl Splitter: Types and quantization parameters must match.");
1556 reasonIfUnsupported,
1568 reasonIfUnsupported,
1580 reasonIfUnsupported,
1593 reasonIfUnsupported,
1607 reasonIfUnsupported,
1634 reasonIfUnsupported,
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_END
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported) const
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo > > &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsScatterNdSupported(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &updates, const TensorInfo &output, const ScatterNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsReverseV2Supported(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
const TensorShape & GetShape() const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
DataType GetDataType() const
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
arm_compute::Status ClUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo)
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
arm_compute::Status ClBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
arm_compute::Status ClSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClElementwiseBinaryValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
arm_compute::Status ClTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClScatterNdWorkloadValidate(const TensorInfo &inputInfo, const TensorInfo &indicesInfo, const TensorInfo &updatesInfo, const TensorInfo &outputInfo, const ScatterNdDescriptor &descriptor)
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
@ UnidirectionalSequenceLstm
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo ¶msInfo)
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
SpaceToDepthDescriptor DepthToSpaceDescriptor
A DepthToSpaceDescriptor for the DepthToSpaceLayer.
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
arm_compute::Status ClReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
void SetValueChecked(Optional< T & > optionalRef, V &&val)
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo)
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo > > &outputs, unsigned int splitAxis)
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo)
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
bool FalseFunc(Optional< std::string & > reasonIfUnsupported, Params &&... params)
arm_compute::Status ClFloorDivWorkloadValidate(const TensorInfo &input0Info, const TensorInfo &input1Info, const TensorInfo &outputInfo, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
arm_compute::Status ClSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
SoftmaxDescriptor LogSoftmaxDescriptor
A LogSoftmaxDescriptor for the LogSoftmaxLayer.
LstmDescriptor UnidirectionalSequenceLstmDescriptor
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClGatherNdWorkloadValidate(const TensorInfo ¶msInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
arm_compute::Status ClConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
void IgnoreUnused(Ts &&...)
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
An ActivationDescriptor for the ActivationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Base class for all descriptors.
A BatchMatMulDescriptor for the BatchMatMul operator.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
A ComparisonDescriptor for the ComparisonLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
A FillDescriptor for the FillLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
A GatherDescriptor for the GatherLayer.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A L2NormalizationDescriptor for the L2NormalizationLayer.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
An LstmDescriptor for the LstmLayer.
A MeanDescriptor for the MeanLayer.
A NormalizationDescriptor for the NormalizationLayer.
An OriginsDescriptor for the ConcatLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
uint32_t GetNumDimensions() const
Get the number of dimensions.
A PadDescriptor for the PadLayer.
A PermuteDescriptor for the PermuteLayer.
A Pooling2dDescriptor for the Pooling2dLayer.
A Pooling3dDescriptor for the Pooling3dLayer.
A QLstmDescriptor for the QLstmLayer.
A ReduceDescriptor for the REDUCE operators.
A ReshapeDescriptor for the ReshapeLayer.
A ResizeDescriptor for the ResizeLayer.
A ScatterNdDescriptor for the ScatterNdLayer.
A SliceDescriptor for the SliceLayer.
A SoftmaxDescriptor for the SoftmaxLayer.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
A StackDescriptor for the StackLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
A TransposeDescriptor for the TransposeLayer.
A ViewsDescriptor for the SplitterLayer.
uint32_t GetNumDimensions() const
Get the number of dimensions.