ArmNN
 24.11
ClLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClLayerSupport.hpp"
7 #include "ClBackendId.hpp"
9 
11 
12 #include <InternalTypes.hpp>
13 #include <LayerSupportCommon.hpp>
14 
17 
18 #if defined(ARMCOMPUTECL_ENABLED)
92 #endif
93 
94 
95 namespace armnn
96 {
97 
98 namespace
99 {
100 
101 template<unsigned int FilterSize>
102 bool IsMatchingSize2d(const TensorInfo& weightInfo)
103 {
104  // Width & Height must match.
105  return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
106 }
107 
108 template<uint32_t ValidStride>
109 bool IsMatchingStride(uint32_t actualStride)
110 {
111  return ValidStride == actualStride;
112 }
113 
114 template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
115 bool IsMatchingStride(uint32_t actualStride)
116 {
117  return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
118 }
119 
120 template<typename ... Args>
121 bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
122 {
123  IgnoreUnused(reasonIfUnsupported, (args)...);
124 #if defined(ARMCOMPUTECL_ENABLED)
125  return true;
126 #else
127  if (reasonIfUnsupported)
128  {
129  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
130  }
131  return false;
132 #endif
133 }
134 
135 #if defined(ARMCOMPUTECL_ENABLED)
136 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
137 #else
138 #define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
139 #endif
140 
141 #if defined(ARMCOMPUTECL_ENABLED)
142 template<class FuncType, class... Args>
143 inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
144 {
145  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
146  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
147  if (!supported && reasonIfUnsupported)
148  {
149  reasonIfUnsupported.value() = aclStatus.error_description();
150  }
151  return supported;
152 }
153 
154 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
155  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
156 #else
157 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
158  return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
159 #endif
160 
161 template<typename FloatFunc, typename Uint8Func, typename ... Params>
162 bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
163  DataType dataType,
164  FloatFunc floatFuncPtr,
165  Uint8Func uint8FuncPtr,
166  Params&&... params)
167 {
168  return IsClBackendSupported(reasonIfUnsupported) &&
169  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
170  dataType,
171  floatFuncPtr,
172  floatFuncPtr,
173  uint8FuncPtr,
174  &FalseFunc<>,
175  &FalseFunc<>,
176  std::forward<Params>(params)...);
177 }
178 } // anonymous namespace
179 
181  : m_ModelContextPtr(modelContextPtr)
182 {
183 }
184 
186  : m_ModelContextPtr(nullptr)
187 {
188 }
189 
191  const std::vector<TensorInfo>& infos,
192  const BaseDescriptor& descriptor,
193  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
194  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
195  Optional<std::string&> reasonIfUnsupported) const
196 {
197  switch (type)
198  {
200  return IsActivationSupported(infos[0],
201  infos[1],
202  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
203  reasonIfUnsupported);
204  case LayerType::Addition:
206  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
209  return IsArgMinMaxSupported(infos[0],
210  infos[1],
211  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
212  reasonIfUnsupported);
214  return IsBatchMatMulSupported(infos[0],
215  infos[1],
216  infos[2],
217  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
218  reasonIfUnsupported);
220  return IsBatchNormalizationSupported(infos[0],
221  infos[1],
222  infos[2],
223  infos[3],
224  infos[4],
225  infos[5],
226  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
227  (&descriptor)),
228  reasonIfUnsupported);
230  return IsBatchToSpaceNdSupported(infos[0],
231  infos[1],
232  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
233  reasonIfUnsupported);
234  case LayerType::Cast:
235  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
237  return IsChannelShuffleSupported(infos[0],
238  infos[1],
239  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
240  reasonIfUnsupported);
242  return IsComparisonSupported(infos[0],
243  infos[1],
244  infos[2],
245  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
246  reasonIfUnsupported);
247  case LayerType::Concat:
248  {
249  std::vector<const TensorInfo*> inputInfos;
250  for (uint32_t i = 0; i < (infos.size() - 1); i++)
251  {
252  inputInfos.push_back(&infos[i]);
253  }
254  return IsConcatSupported(inputInfos,
255  infos[infos.size() - 1],
256  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
257  reasonIfUnsupported);
258  }
259  case LayerType::Constant:
260  return IsConstantSupported(infos[0], reasonIfUnsupported);
262  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
264  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
266  {
267  if (infos.size() != 4)
268  {
269  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
270  "TensorInfos should be of format: {input, output, weights, biases}.");
271  }
272 
273  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
274  if (infos[3] == TensorInfo())
275  {
276  return IsConvolution2dSupported(infos[0],
277  infos[1],
278  desc,
279  infos[2],
280  EmptyOptional(),
281  reasonIfUnsupported);
282  }
283  else
284  {
285  return IsConvolution2dSupported(infos[0],
286  infos[1],
287  desc,
288  infos[2],
289  infos[3],
290  reasonIfUnsupported);
291  }
292  }
294  {
295  if (infos.size() != 4)
296  {
297  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
298  "TensorInfos should be of format: {input, output, weights, biases}.");
299  }
300 
301  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
302  if (infos[3] == TensorInfo())
303  {
304  return IsConvolution3dSupported(infos[0],
305  infos[1],
306  desc,
307  infos[2],
308  EmptyOptional(),
309  reasonIfUnsupported);
310  }
311  else
312  {
313  return IsConvolution3dSupported(infos[0],
314  infos[1],
315  desc,
316  infos[2],
317  infos[3],
318  reasonIfUnsupported);
319  }
320  }
322  return IsDepthToSpaceSupported(infos[0],
323  infos[1],
324  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
325  reasonIfUnsupported);
327  {
328  if (infos.size() != 4)
329  {
330  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
331  "TensorInfos should be of format: {input, output, weights, biases}.");
332  }
333 
334  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
335  if (infos[3] == TensorInfo())
336  {
337  return IsDepthwiseConvolutionSupported(infos[0],
338  infos[1],
339  desc,
340  infos[2],
341  EmptyOptional(),
342  reasonIfUnsupported);
343  }
344  else
345  {
346  return IsDepthwiseConvolutionSupported(infos[0],
347  infos[1],
348  desc,
349  infos[2],
350  infos[3],
351  reasonIfUnsupported);
352  }
353  }
355  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
356  case LayerType::Division:
358  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
361  {
362  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
363 
364  switch (desc.m_Operation)
365  {
368  reasonIfUnsupported,
369  infos[0],
370  infos[1],
371  infos[2],
372  nullptr);
375  reasonIfUnsupported,
376  infos[0],
377  infos[1],
378  infos[2],
379  nullptr);
382  reasonIfUnsupported,
383  infos[0],
384  infos[1],
385  infos[2],
386  nullptr);
389  reasonIfUnsupported,
390  infos[0],
391  infos[1],
392  infos[2]);
395  reasonIfUnsupported,
396  infos[0],
397  infos[1],
398  infos[2]);
401  reasonIfUnsupported,
402  infos[0],
403  infos[1],
404  infos[2],
405  nullptr);
409  reasonIfUnsupported,
410  infos[0],
411  infos[1],
412  infos[2],
413  desc,
414  nullptr);
417  reasonIfUnsupported,
418  infos[0],
419  infos[1],
420  infos[2],
421  nullptr);
422  default:
423  return false;
424  }
425  }
427  return IsElementwiseUnarySupported(infos[0],
428  infos[1],
429  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
430  reasonIfUnsupported);
431  case LayerType::Fill:
432  return IsFillSupported(infos[0],
433  infos[1],
434  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
435  reasonIfUnsupported);
436  case LayerType::Floor:
437  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
439  return IsFullyConnectedSupported(infos[0],
440  infos[1],
441  infos[2],
442  infos[3],
443  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
444  reasonIfUnsupported);
445  case LayerType::Gather:
446  return IsGatherSupported(infos[0],
447  infos[1],
448  infos[2],
449  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
450  reasonIfUnsupported);
451  case LayerType::GatherNd:
452  return IsGatherNdSupported(infos[0],
453  infos[1],
454  infos[2],
455  reasonIfUnsupported);
456  case LayerType::Input:
457  return IsInputSupported(infos[0], reasonIfUnsupported);
459  return IsInstanceNormalizationSupported(infos[0],
460  infos[1],
461  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
462  (&descriptor)),
463  reasonIfUnsupported);
465  return IsL2NormalizationSupported(infos[0],
466  infos[1],
467  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
468  reasonIfUnsupported);
470  return IsLogicalBinarySupported(infos[0],
471  infos[1],
472  infos[2],
473  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
474  reasonIfUnsupported);
476  return IsLogSoftmaxSupported(infos[0],
477  infos[1],
478  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
479  reasonIfUnsupported);
480  case LayerType::Lstm:
481  return IsLstmSupported(infos[0],
482  infos[1],
483  infos[2],
484  infos[3],
485  infos[4],
486  infos[5],
487  infos[6],
488  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
489  lstmParamsInfo.value(),
490  reasonIfUnsupported);
491  case LayerType::Map:
492  return true;
493  case LayerType::MemCopy:
494  return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
496  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
497  case LayerType::Merge:
498  return LayerSupportBase::IsMergeSupported(infos[0],
499  infos[1],
500  infos[2],
501  reasonIfUnsupported);
502  case LayerType::Maximum:
504  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
506  case LayerType::Mean:
507  return IsMeanSupported(infos[0],
508  infos[1],
509  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
510  reasonIfUnsupported);
511  case LayerType::Minimum:
513  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
517  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
520  return IsNormalizationSupported(infos[0],
521  infos[1],
522  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
523  reasonIfUnsupported);
524  case LayerType::Output:
525  return IsOutputSupported(infos[0], reasonIfUnsupported);
526  case LayerType::Pad:
527  return IsPadSupported(infos[0],
528  infos[1],
529  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
530  reasonIfUnsupported);
531  case LayerType::Permute:
532  return IsPermuteSupported(infos[0],
533  infos[1],
534  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
535  reasonIfUnsupported);
537  return IsPooling2dSupported(infos[0],
538  infos[1],
539  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
540  reasonIfUnsupported);
542  return IsPooling3dSupported(infos[0],
543  infos[1],
544  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
545  reasonIfUnsupported);
546  case LayerType::Prelu:
547  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
548  case LayerType::QLstm:
549  return IsQLstmSupported(infos[0],
550  infos[1],
551  infos[2],
552  infos[3],
553  infos[4],
554  infos[5],
555  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
556  lstmParamsInfo.value(),
557  reasonIfUnsupported);
558  case LayerType::Quantize:
559  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
561  return IsQuantizedLstmSupported(infos[0],
562  infos[1],
563  infos[2],
564  infos[3],
565  infos[4],
566  quantizedLstmParamsInfo.value(),
567  reasonIfUnsupported);
568  case LayerType::Rank:
569  return true;
570  case LayerType::Reduce:
571  return IsReduceSupported(infos[0],
572  infos[1],
573  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
574  reasonIfUnsupported);
575  case LayerType::Reshape:
576  return IsReshapeSupported(infos[0],
577  infos[1],
578  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
579  reasonIfUnsupported);
580  case LayerType::Resize:
581  return IsResizeSupported(infos[0],
582  infos[1],
583  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
584  reasonIfUnsupported);
586  return IsReverseV2Supported(infos[0],
587  infos[1],
588  infos[2],
589  reasonIfUnsupported);
591  return IsScatterNdSupported(infos[0], // input/shape
592  infos[1], // indices
593  infos[2], // updates
594  infos[3], // output
595  *(PolymorphicDowncast<const ScatterNdDescriptor*>(&descriptor)),
596  reasonIfUnsupported);
597  case LayerType::Shape:
598  return LayerSupportBase::IsShapeSupported(infos[0],
599  infos[1],
600  reasonIfUnsupported);
601  case LayerType::Slice:
602  return IsSliceSupported(infos[0],
603  infos[1],
604  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
605  reasonIfUnsupported);
606  case LayerType::Softmax:
607  return IsSoftmaxSupported(infos[0],
608  infos[1],
609  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
610  reasonIfUnsupported);
612  return IsSpaceToBatchNdSupported(infos[0],
613  infos[1],
614  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
615  reasonIfUnsupported);
617  return IsSpaceToDepthSupported(infos[0],
618  infos[1],
619  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
620  reasonIfUnsupported);
621  case LayerType::Splitter:
622  {
623  std::vector<TensorInfo> outputInfos;
624  for (uint32_t i = 1; i < infos.size(); i++)
625  {
626  outputInfos.push_back(infos[i]);
627  }
628  return IsSplitterSupported(infos[0],
629  {outputInfos.begin(), outputInfos.end()},
630  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
631  reasonIfUnsupported);
632  }
633  case LayerType::Stack:
634  {
635  std::vector<const TensorInfo*> inputInfos;
636  for (uint32_t i = 0; i < infos.size() - 1; i++)
637  {
638  inputInfos.push_back(&infos[i]);
639  }
640  return IsStackSupported(inputInfos,
641  infos[infos.size() - 1],
642  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
643  reasonIfUnsupported);
644  }
646  return IsStridedSliceSupported(infos[0],
647  infos[1],
648  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
649  reasonIfUnsupported);
652  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
654  case LayerType::Tile:
655  return IsTileSupported(infos[0],
656  infos[1],
657  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
658  reasonIfUnsupported);
660  return IsTransposeSupported(infos[0],
661  infos[1],
662  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
663  reasonIfUnsupported);
665  {
666  if (infos.size() != 4)
667  {
668  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
669  "TensorInfos should be of format: {input, output, weights, biases}.");
670  }
671 
672  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
673  if (infos[3] == TensorInfo())
674  {
675  return IsTransposeConvolution2dSupported(infos[0],
676  infos[1],
677  desc,
678  infos[2],
679  EmptyOptional(),
680  reasonIfUnsupported);
681  }
682  else
683  {
684  return IsTransposeConvolution2dSupported(infos[0],
685  infos[1],
686  desc,
687  infos[2],
688  infos[3],
689  reasonIfUnsupported);
690  }
691  }
694  infos[1],
695  infos[2],
696  infos[3],
697  infos[4],
698  infos[5],
699  *(PolymorphicDowncast<const
700  UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
701  lstmParamsInfo.value(),
702  reasonIfUnsupported);
703  case LayerType::Unmap:
704  return true;
705  default:
706  // layers not supported in cl by default:
707  // debug, detectionpostprocess, fakequantization,
708  // precompiled, standin, switch, pooling3d, fused
709  return false;
710  }
711 }
712 
714  const TensorInfo& output,
715  const ActivationDescriptor& descriptor,
716  Optional<std::string&> reasonIfUnsupported) const
717 {
719  reasonIfUnsupported,
720  input,
721  output,
722  descriptor);
723 }
724 
726  const TensorInfo& input1,
727  const TensorInfo& output,
728  Optional<std::string&> reasonIfUnsupported) const
729 {
731  reasonIfUnsupported,
732  input0,
733  input1,
734  output,
735  nullptr);
736 }
737 
739  const TensorInfo& output,
740  const ArgMinMaxDescriptor& descriptor,
741  Optional<std::string&> reasonIfUnsupported) const
742 {
743 
745  reasonIfUnsupported,
746  input,
747  output,
748  descriptor);
749 }
750 
752  const TensorInfo& inputY,
753  const TensorInfo& output,
754  const BatchMatMulDescriptor& descriptor,
755  Optional<std::string&> reasonIfUnsupported) const
756 {
758  reasonIfUnsupported,
759  inputX,
760  inputY,
761  output,
762  descriptor,
763  nullptr);
764 }
765 
767  const TensorInfo& output,
768  const TensorInfo& mean,
769  const TensorInfo& var,
770  const TensorInfo& beta,
771  const TensorInfo& gamma,
772  const BatchNormalizationDescriptor& descriptor,
773  Optional<std::string&> reasonIfUnsupported) const
774 {
776  reasonIfUnsupported,
777  input,
778  output,
779  mean,
780  var,
781  beta,
782  gamma,
783  descriptor,
784  nullptr);
785 }
786 
788  const TensorInfo& output,
789  const BatchToSpaceNdDescriptor& descriptor,
790  Optional<std::string&> reasonIfUnsupported) const
791 {
793  reasonIfUnsupported,
794  input,
795  output,
796  descriptor);
797 }
798 
800  const TensorInfo& output,
801  Optional<std::string&> reasonIfUnsupported) const
802 {
804  reasonIfUnsupported,
805  input,
806  output);
807 }
808 
810  const TensorInfo& output,
811  const ChannelShuffleDescriptor& descriptor,
812  Optional<std::string&> reasonIfUnsupported) const
813 {
815  reasonIfUnsupported,
816  input,
817  output,
818  descriptor);
819 }
820 
822  const TensorInfo& input1,
823  const TensorInfo& output,
824  const ComparisonDescriptor& descriptor,
825  Optional<std::string&> reasonIfUnsupported) const
826 {
828  reasonIfUnsupported,
829  input0,
830  input1,
831  output,
832  descriptor);
833 }
834 
835 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
836  const TensorInfo& output,
837  const OriginsDescriptor& descriptor,
838  Optional<std::string&> reasonIfUnsupported) const
839 {
840  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
841  {
842  SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
843  return false;
844  }
845 
846  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
847  if(concatInnerAxis < 3) // Width, height, or channels
848  {
850  reasonIfUnsupported,
851  inputs,
852  output,
853  descriptor);
854  }
855  else if (concatInnerAxis == 3)
856  {
857  // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
858  // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
859  for (auto& input : inputs)
860  {
861  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
862  {
863  SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
864  return false;
865  }
866  }
867  return true; // Sub-tensors support concat along batch
868  }
869  else // > 4 dimensions not supported.
870  {
871  SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
872  return false;
873  }
874 }
875 
877  Optional<std::string&> reasonIfUnsupported) const
878 {
880  reasonIfUnsupported,
881  output);
882 }
883 
885  const TensorInfo& output,
886  Optional<std::string&> reasonIfUnsupported) const
887 {
889  reasonIfUnsupported,
890  input,
891  output);
892 }
893 
895  const TensorInfo& output,
896  Optional<std::string&> reasonIfUnsupported) const
897 {
899  reasonIfUnsupported,
900  input,
901  output);
902 }
903 
905  const TensorInfo& output,
906  const Convolution2dDescriptor& descriptor,
907  const TensorInfo& weights,
908  const Optional<TensorInfo>& biases,
909  Optional<std::string&> reasonIfUnsupported) const
910 {
911  bool isFastMathEnabled = false;
912 #if defined(ARMCOMPUTECL_ENABLED)
913  if (m_ModelContextPtr)
914  {
915  if (m_ModelContextPtr.get() != nullptr)
916  {
917  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
918  if (modelOptions)
919  {
920  isFastMathEnabled = modelOptions->IsFastMathEnabled();
921  }
922  }
923  }
924 #endif
925 
927  reasonIfUnsupported,
928  input,
929  output,
930  descriptor,
931  weights,
932  biases,
933  isFastMathEnabled,
934  nullptr);
935 }
936 
938  const TensorInfo& output,
939  const Convolution3dDescriptor& descriptor,
940  const TensorInfo& weights,
941  const Optional<TensorInfo>& biases,
942  Optional<std::string&> reasonIfUnsupported) const
943 {
944  bool isFastMathEnabled = false;
945 #if defined(ARMCOMPUTECL_ENABLED)
946  if (m_ModelContextPtr)
947 {
948  if (m_ModelContextPtr.get() != nullptr)
949  {
950  auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
951  if (modelOptions)
952  {
953  isFastMathEnabled = modelOptions->IsFastMathEnabled();
954  }
955  }
956 }
957 #endif
958 
960  reasonIfUnsupported,
961  input,
962  output,
963  descriptor,
964  weights,
965  biases,
966  isFastMathEnabled,
967  nullptr);
968 }
969 
971  const TensorInfo& output,
972  Optional<std::string&> reasonIfUnsupported) const
973 {
975  reasonIfUnsupported,
976  input,
977  output);
978 }
979 
981  const TensorInfo& output,
982  const DepthToSpaceDescriptor& descriptor,
983  Optional<std::string&> reasonIfUnsupported) const
984 {
986  reasonIfUnsupported,
987  input,
988  output,
989  descriptor);
990 }
991 
993  const TensorInfo& output,
994  const DepthwiseConvolution2dDescriptor& descriptor,
995  const TensorInfo& weights,
996  const Optional<TensorInfo>& biases,
997  Optional<std::string&> reasonIfUnsupported) const
998 {
1000  reasonIfUnsupported,
1001  input,
1002  output,
1003  descriptor,
1004  weights,
1005  biases,
1006  nullptr);
1007 }
1008 
1010  const TensorInfo& output,
1011  const DepthwiseConvolution2dDescriptor& descriptor,
1012  const TensorInfo& weights,
1013  const Optional<TensorInfo>& biases,
1014  Optional<std::string&> reasonIfUnsupported) const
1015 {
1017  reasonIfUnsupported,
1018  input,
1019  output,
1020  descriptor,
1021  weights,
1022  biases,
1023  nullptr);
1024 }
1025 
1026 
1028  const TensorInfo& input1,
1029  const TensorInfo& output,
1030  Optional<std::string&> reasonIfUnsupported) const
1031 {
1033  reasonIfUnsupported,
1034  input0,
1035  input1,
1036  output,
1037  nullptr);
1038 }
1039 
1041  const TensorInfo& output,
1042  const ElementwiseUnaryDescriptor& descriptor,
1043  Optional<std::string&> reasonIfUnsupported) const
1044 {
1045  switch(descriptor.m_Operation)
1046  {
1047  case UnaryOperation::Abs:
1049  reasonIfUnsupported,
1050  input,
1051  output);
1052  case UnaryOperation::Exp:
1054  reasonIfUnsupported,
1055  input,
1056  output);
1057  case UnaryOperation::Log:
1059  reasonIfUnsupported,
1060  input,
1061  output);
1064  reasonIfUnsupported,
1065  input,
1066  output);
1067  case UnaryOperation::Neg:
1069  reasonIfUnsupported,
1070  input,
1071  output);
1072  case UnaryOperation::Rsqrt:
1074  reasonIfUnsupported,
1075  input,
1076  output);
1077  case UnaryOperation::Sin:
1079  reasonIfUnsupported,
1080  input,
1081  output);
1082  case UnaryOperation::Sqrt:
1084  reasonIfUnsupported,
1085  input,
1086  output);
1087  default:
1088  return false;
1089  }
1090 }
1091 
1093  const TensorInfo& output,
1094  const FillDescriptor& descriptor,
1095  Optional<std::string&> reasonIfUnsupported) const
1096 {
1097  armnn::IgnoreUnused(input);
1098  armnn::IgnoreUnused(output);
1099  armnn::IgnoreUnused(descriptor);
1100 
1101  return IsClBackendSupported(reasonIfUnsupported);
1102 }
1103 
1105  const TensorInfo& output,
1106  Optional<std::string&> reasonIfUnsupported) const
1107 {
1109  reasonIfUnsupported,
1110  input,
1111  output);
1112 }
1113 
1115  const TensorInfo& output,
1116  const TensorInfo& weights,
1117  const TensorInfo& biases,
1118  const FullyConnectedDescriptor& descriptor,
1119  Optional<std::string&> reasonIfUnsupported) const
1120 {
1122  reasonIfUnsupported,
1123  input,
1124  output,
1125  weights,
1126  biases,
1127  descriptor,
1128  nullptr);
1129 }
1130 
1132  const TensorInfo& input1,
1133  const TensorInfo& output,
1134  const GatherDescriptor& descriptor,
1135  Optional<std::string&> reasonIfUnsupported) const
1136 {
1138  reasonIfUnsupported,
1139  input0,
1140  input1,
1141  output,
1142  descriptor);
1143 }
1144 
1146  const TensorInfo& input1,
1147  const TensorInfo& output,
1148  Optional<std::string&> reasonIfUnsupported) const
1149 {
1151  reasonIfUnsupported,
1152  input0,
1153  input1,
1154  output);
1155 }
1156 
1158  Optional<std::string&> reasonIfUnsupported) const
1159 {
1160  return IsClBackendSupported(reasonIfUnsupported, input);
1161 }
1162 
1164  const TensorInfo& output,
1165  const InstanceNormalizationDescriptor& descriptor,
1166  Optional<std::string&> reasonIfUnsupported) const
1167 {
1169  reasonIfUnsupported,
1170  input,
1171  output,
1172  descriptor);
1173 }
1174 
1176  const TensorInfo& output,
1177  const L2NormalizationDescriptor& descriptor,
1178  Optional<std::string&> reasonIfUnsupported) const
1179 {
1181  reasonIfUnsupported,
1182  input,
1183  output,
1184  descriptor);
1185 }
1186 
1188  const TensorInfo& input1,
1189  const TensorInfo& output,
1190  const LogicalBinaryDescriptor& descriptor,
1191  Optional<std::string&> reasonIfUnsupported) const
1192 {
1193  IgnoreUnused(output);
1194 
1195  switch(descriptor.m_Operation)
1196  {
1199  reasonIfUnsupported,
1200  input0,
1201  input1,
1202  output);
1205  reasonIfUnsupported,
1206  input0,
1207  input1,
1208  output);
1209  default:
1210  return false;
1211  }
1212 }
1213 
1214 
1216  const TensorInfo& output,
1217  const LogSoftmaxDescriptor& descriptor,
1218  Optional<std::string&> reasonIfUnsupported) const
1219 {
1221  reasonIfUnsupported,
1222  input,
1223  output,
1224  descriptor);
1225 }
1226 
1228  const TensorInfo& outputStateIn,
1229  const TensorInfo& cellStateIn,
1230  const TensorInfo& scratchBuffer,
1231  const TensorInfo& outputStateOut,
1232  const TensorInfo& cellStateOut,
1233  const TensorInfo& output,
1234  const LstmDescriptor& descriptor,
1235  const LstmInputParamsInfo& paramsInfo,
1236  Optional<std::string&> reasonIfUnsupported) const
1237 {
1239  reasonIfUnsupported,
1240  input,
1241  outputStateIn,
1242  cellStateIn,
1243  scratchBuffer,
1244  outputStateOut,
1245  cellStateOut,
1246  output,
1247  descriptor,
1248  paramsInfo);
1249 }
1250 
1252  const TensorInfo& input1,
1253  const TensorInfo& output,
1254  Optional<std::string&> reasonIfUnsupported) const
1255 {
1257  reasonIfUnsupported,
1258  input0,
1259  input1,
1260  output);
1261 }
1262 
1264  const TensorInfo& output,
1265  const MeanDescriptor& descriptor,
1266  Optional<std::string&> reasonIfUnsupported) const
1267 {
1269  reasonIfUnsupported,
1270  input,
1271  output,
1272  descriptor);
1273 }
1274 
1276  const TensorInfo& input1,
1277  const TensorInfo& output,
1278  Optional<std::string&> reasonIfUnsupported) const
1279 {
1281  reasonIfUnsupported,
1282  input0,
1283  input1,
1284  output);
1285 }
1286 
1288  const TensorInfo& input1,
1289  const TensorInfo& output,
1290  Optional<std::string&> reasonIfUnsupported) const
1291 {
1293  reasonIfUnsupported,
1294  input0,
1295  input1,
1296  output,
1297  nullptr);
1298 }
1299 
1301  const TensorInfo& output,
1302  const NormalizationDescriptor& descriptor,
1303  Optional<std::string&> reasonIfUnsupported) const
1304 {
1305  FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1306 }
1307 
1309  Optional<std::string&> reasonIfUnsupported) const
1310 {
1311  return IsClBackendSupported(reasonIfUnsupported, output);
1312 }
1313 
1315  const TensorInfo& output,
1316  const PadDescriptor& descriptor,
1317  Optional<std::string&> reasonIfUnsupported) const
1318 {
1320  reasonIfUnsupported,
1321  input,
1322  output,
1323  descriptor);
1324 }
1325 
1327  const TensorInfo& output,
1328  const PermuteDescriptor& descriptor,
1329  Optional<std::string&> reasonIfUnsupported) const
1330 {
1331  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1332 }
1333 
1335  const TensorInfo& output,
1336  const Pooling2dDescriptor& descriptor,
1337  Optional<std::string&> reasonIfUnsupported) const
1338 {
1339  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1340 }
1341 
1343  const TensorInfo& output,
1344  const Pooling3dDescriptor& descriptor,
1345  Optional<std::string&> reasonIfUnsupported) const
1346 {
1347  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1348 }
1349 
1351  const armnn::TensorInfo &alpha,
1352  const armnn::TensorInfo &output,
1353  armnn::Optional<std::string &> reasonIfUnsupported) const
1354 {
1355  FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1356 }
1357 
1359  const TensorInfo& previousOutputIn,
1360  const TensorInfo& previousCellStateIn,
1361  const TensorInfo& outputStateOut,
1362  const TensorInfo& cellStateOut,
1363  const TensorInfo& output,
1364  const QLstmDescriptor& descriptor,
1365  const LstmInputParamsInfo& paramsInfo,
1366  Optional<std::string&> reasonIfUnsupported) const
1367 {
1368  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1369  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1370  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1371  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1372  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1374  {
1376  reasonIfUnsupported,
1377  input,
1378  previousCellStateIn,
1379  previousOutputIn,
1380  cellStateOut,
1381  outputStateOut,
1382  output,
1383  descriptor,
1384  paramsInfo);
1385  }
1386  else
1387  {
1388  return false;
1389  }
1390 }
1391 
1393  const TensorInfo& previousCellStateIn,
1394  const TensorInfo& previousOutputIn,
1395  const TensorInfo& cellStateOut,
1396  const TensorInfo& output,
1397  const QuantizedLstmInputParamsInfo& paramsInfo,
1398  Optional<std::string&> reasonIfUnsupported) const
1399 {
1401  reasonIfUnsupported,
1402  input,
1403  previousCellStateIn,
1404  previousOutputIn,
1405  cellStateOut,
1406  output,
1407  paramsInfo);
1408 }
1409 
1411  const TensorInfo& output,
1412  Optional<std::string&> reasonIfUnsupported) const
1413 {
1415  reasonIfUnsupported,
1416  input,
1417  output);
1418 }
1419 
1421  const TensorInfo& output,
1422  const ReduceDescriptor& descriptor,
1423  Optional<std::string&> reasonIfUnsupported) const
1424 {
1426  reasonIfUnsupported,
1427  input,
1428  output,
1429  descriptor);
1430 }
1431 
1433  const TensorInfo& output,
1434  const ReshapeDescriptor& descriptor,
1435  Optional<std::string&> reasonIfUnsupported) const
1436 {
1437  IgnoreUnused(descriptor);
1438  FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
1439 }
1440 
1442  const TensorInfo& output,
1443  const ResizeDescriptor& descriptor,
1444  Optional<std::string&> reasonIfUnsupported) const
1445 {
1446  FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1447 }
1448 
1450  const TensorInfo& axis,
1451  const TensorInfo& output,
1452  Optional<std::string&> reasonIfUnsupported) const
1453 {
1455  reasonIfUnsupported,
1456  input,
1457  axis,
1458  output);
1459 }
1460 
1462  const TensorInfo& indices,
1463  const TensorInfo& updates,
1464  const TensorInfo& output,
1465  const ScatterNdDescriptor& descriptor,
1466  Optional<std::string&> reasonIfUnsupported) const
1467 {
1469  reasonIfUnsupported,
1470  input,
1471  indices,
1472  updates,
1473  output,
1474  descriptor);
1475 }
1476 
1478  const TensorInfo& output,
1479  const SliceDescriptor& descriptor,
1480  Optional<std::string&> reasonIfUnsupported) const
1481 {
1482  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1483 }
1484 
1486  const TensorInfo& output,
1487  const SoftmaxDescriptor& descriptor,
1488  Optional<std::string&> reasonIfUnsupported) const
1489 {
1490  FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1491 }
1492 
1494  const TensorInfo& output,
1495  const SpaceToBatchNdDescriptor& descriptor,
1496  Optional<std::string&> reasonIfUnsupported) const
1497 {
1499  reasonIfUnsupported,
1500  input,
1501  output,
1502  descriptor);
1503 }
1504 
1506  const TensorInfo& output,
1507  const SpaceToDepthDescriptor& descriptor,
1508  Optional<std::string&> reasonIfUnsupported) const
1509 {
1511  reasonIfUnsupported,
1512  input,
1513  output,
1514  descriptor);
1515 }
1516 
1518  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1519  const ViewsDescriptor& descriptor,
1520  Optional<std::string&> reasonIfUnsupported) const
1521 {
1522 #if defined(ARMCOMPUTECL_ENABLED)
1523  // Split along the last dimension, cannot use sub-tensors
1524  // as width and height of the sub-tensors do not match
1525  // the width and height of the parent tensor
1526  // in case of input with more than 2D.
1527  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1528  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1529  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1530  {
1532  reasonIfUnsupported,
1533  input,
1534  outputs,
1535  *splitAxis.begin());
1536  }
1537 #endif
1538  IgnoreUnused(descriptor);
1539  for (auto output : outputs)
1540  {
1541  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1542  {
1543  SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1544  return false;
1545  }
1546  }
1547  return true;
1548 }
1549 
1550 bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1551  const TensorInfo& output,
1552  const StackDescriptor& descriptor,
1553  Optional<std::string&> reasonIfUnsupported) const
1554 {
1556  reasonIfUnsupported,
1557  inputs,
1558  output,
1559  descriptor);
1560 }
1561 
1563  const TensorInfo& output,
1564  const StridedSliceDescriptor& descriptor,
1565  Optional<std::string&> reasonIfUnsupported) const
1566 {
1568  reasonIfUnsupported,
1569  input,
1570  output,
1571  descriptor);
1572 }
1573 
1575  const TensorInfo& input1,
1576  const TensorInfo& output,
1577  Optional<std::string&> reasonIfUnsupported) const
1578 {
1580  reasonIfUnsupported,
1581  input0,
1582  input1,
1583  output,
1584  nullptr);
1585 }
1586 
1588  const TensorInfo& output,
1589  const TileDescriptor& descriptor,
1590  Optional<std::string&> reasonIfUnsupported) const
1591 {
1593  reasonIfUnsupported,
1594  input,
1595  output,
1596  descriptor);
1597 }
1598 
1600  const TensorInfo& output,
1601  const TransposeConvolution2dDescriptor& descriptor,
1602  const TensorInfo& weights,
1603  const Optional<TensorInfo>& biases,
1604  Optional<std::string&> reasonIfUnsupported) const
1605 {
1607  reasonIfUnsupported,
1608  input,
1609  output,
1610  descriptor,
1611  weights,
1612  biases);
1613 }
1614 
1616  const TensorInfo& output,
1617  const TransposeDescriptor& descriptor,
1618  Optional<std::string&> reasonIfUnsupported) const
1619 {
1620  FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1621 }
1622 
1624  const TensorInfo& outputStateIn,
1625  const TensorInfo& cellStateIn,
1626  const TensorInfo& outputStateOut,
1627  const TensorInfo& cellStateOut,
1628  const TensorInfo& output,
1629  const UnidirectionalSequenceLstmDescriptor& descriptor,
1630  const LstmInputParamsInfo& paramsInfo,
1631  Optional<std::string&> reasonIfUnsupported) const
1632 {
1634  reasonIfUnsupported,
1635  input,
1636  outputStateIn,
1637  cellStateIn,
1638  outputStateOut,
1639  cellStateOut,
1640  output,
1641  descriptor,
1642  paramsInfo);
1643 }
1644 
1645 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::ClLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1334
ClPooling3dWorkload.hpp
armnn::ClLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:766
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::ClLogWorkloadValidate
arm_compute::Status ClLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogWorkload.cpp:18
armnn::BinaryOperation::Mul
@ Mul
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::ClRsqrtWorkloadValidate
arm_compute::Status ClRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClRsqrtWorkload.cpp:18
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
ClConstantWorkload.hpp
armnn::ClLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1275
ClDepthwiseConvolutionWorkload.hpp
armnn::LayerType::Splitter
@ Splitter
ClArgMinMaxWorkload.hpp
armnn::ClFloorWorkloadValidate
arm_compute::Status ClFloorWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClFloorFloatWorkload.cpp:14
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::ClLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1350
armnn::ClSubtractionValidate
arm_compute::Status ClSubtractionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClSubtractionWorkload.cpp:46
armnn::Optional
Definition: Optional.hpp:270
armnn::ClLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:904
armnn::ClLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1227
ClLogWorkload.hpp
armnn::ClLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:835
armnn::ClQuantizeWorkloadValidate
arm_compute::Status ClQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClQuantizeWorkload.cpp:22
ClElementwiseBinaryWorkload.hpp
armnn::ClTransposeConvolution2dWorkloadValidate
arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: ClTransposeConvolution2dWorkload.cpp:26
ClLogSoftmaxWorkload.hpp
armnn::ClCastValidate
arm_compute::Status ClCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClCastWorkload.cpp:20
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: ClLayerSupport.cpp:154
ClDepthToSpaceWorkload.hpp
armnn::ClStridedSliceWorkloadValidate
arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: ClStridedSliceWorkload.cpp:27
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
WorkloadUtils.hpp
armnn::ClExpWorkloadValidate
arm_compute::Status ClExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClExpWorkload.cpp:18
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::ClMaximumWorkloadValidate
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMaximumWorkload.cpp:24
armnn::ClLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:725
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::ClLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:751
armnn::ClMultiplicationWorkloadValidate
arm_compute::Status ClMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClMultiplicationWorkload.cpp:18
ClQuantizedLstmWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::ClGatherNdWorkloadValidate
arm_compute::Status ClGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: ClGatherNdWorkload.cpp:16
armnn::ClSpaceToDepthWorkloadValidate
arm_compute::Status ClSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: ClSpaceToDepthWorkload.cpp:54
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::ClLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1251
armnn::ClSplitterWorkloadValidate
arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: ClSplitterWorkload.cpp:32
armnn::ClFloorDivWorkloadValidate
arm_compute::Status ClFloorDivWorkloadValidate(const TensorInfo &input0Info, const TensorInfo &input1Info, const TensorInfo &outputInfo, const ActivationDescriptor *activationDescriptor)
Definition: ClFloorDivWorkload.cpp:55
armnn::ClLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:713
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
ClUnidirectionalSequenceLstmFloatWorkload.hpp
ClNormalizationFloatWorkload.hpp
armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status ClUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClUnidirectionalSequenceLstmFloatWorkload.cpp:508
armnn::ClBatchToSpaceNdWorkloadValidate
arm_compute::Status ClBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: ClBatchToSpaceNdWorkload.cpp:17
ClLayerSupport.hpp
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BinaryOperation::Sub
@ Sub
ClReverseV2Workload.hpp
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::ClLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1308
armnn::ClLogicalAndWorkloadValidate
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalAndWorkload.cpp:20
armnn::LayerType::Comparison
@ Comparison
armnn::ClLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:992
armnn::ClLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1314
armnn::ClLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1114
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ClLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1187
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
ClPooling2dWorkload.hpp
armnn::ClDepthToSpaceWorkloadValidate
arm_compute::Status ClDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: ClDepthToSpaceWorkload.cpp:22
armnn::ClActivationWorkloadValidate
arm_compute::Status ClActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: ClActivationWorkload.cpp:17
armnn::ClMeanValidate
arm_compute::Status ClMeanValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: ClMeanWorkload.cpp:17
armnn::ClElementwiseBinaryValidate
arm_compute::Status ClElementwiseBinaryValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClElementwiseBinaryWorkload.cpp:64
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
ClReduceWorkload.hpp
ClMeanWorkload.hpp
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
ClSinWorkload.hpp
armnn::ClChannelShuffleValidate
arm_compute::Status ClChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: ClChannelShuffleWorkload.cpp:20
armnn::LayerType::Tile
@ Tile
ClResizeWorkload.hpp
ClComparisonWorkload.hpp
ClPreluWorkload.hpp
armnn::ClLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1300
armnn::ClLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1420
armnn::LayerType::Stack
@ Stack
BackendRegistry.hpp
armnn::ClFullyConnectedWorkloadValidate
arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClFullyConnectedWorkload.cpp:19
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
IgnoreUnused.hpp
armnn::ClConvolution2dWorkloadValidate
arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution2dWorkload.cpp:23
ClCastWorkload.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::ClLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1477
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
armnn::ClLayerSupport::ClLayerSupport
ClLayerSupport()
Definition: ClLayerSupport.cpp:185
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ClLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1485
ClBatchToSpaceNdWorkload.hpp
armnn::LayerType::GatherNd
@ GatherNd
armnn::ClLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1263
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::ClL2NormalizationWorkloadValidate
arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: ClL2NormalizationFloatWorkload.cpp:17
armnn::ClLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:809
armnn::ClLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:884
armnn::ClLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1104
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
ClDivisionWorkload.hpp
ClGatherNdWorkload.hpp
armnn::ClAdditionValidate
arm_compute::Status ClAdditionValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClAdditionWorkload.cpp:45
armnn::ClNegWorkloadValidate
arm_compute::Status ClNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClNegWorkload.cpp:18
ClBackendModelContext.hpp
LayerSupportCommon.hpp
ClGatherWorkload.hpp
armnn::ClLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1410
armnn::ClQLstmWorkloadValidate
arm_compute::Status ClQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClQLstmWorkload.cpp:247
armnn::ClReduceWorkloadValidate
arm_compute::Status ClReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: ClReduceWorkload.cpp:18
armnn::LayerType::Slice
@ Slice
ClExpWorkload.hpp
armnn::ClLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1287
armnn::ClBackendModelContext
The ClBackendModelContext is used to pass in CL specific backend ModelOptions.
Definition: ClBackendModelContext.hpp:28
armnn::ClLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1599
ClInstanceNormalizationWorkload.hpp
armnn::ClConstantWorkloadValidate
arm_compute::Status ClConstantWorkloadValidate(const TensorInfo &output)
Definition: ClConstantWorkload.cpp:18
armnn::BinaryOperation::Maximum
@ Maximum
armnn::ClPermuteWorkloadValidate
arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: ClPermuteWorkload.cpp:17
armnn::ClLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1163
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
ClActivationWorkload.hpp
ClTransposeConvolution2dWorkload.hpp
ClDequantizeWorkload.hpp
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::ClLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1505
armnn::ClLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1432
ClSpaceToBatchNdWorkload.hpp
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::ScatterNd
@ ScatterNd
armnn::ClLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: ClLayerSupport.cpp:190
armnn::ClResizeWorkloadValidate
arm_compute::Status ClResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: ClResizeWorkload.cpp:22
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1534
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::UnaryOperation::Exp
@ Exp
armnn::ClPooling3dWorkloadValidate
arm_compute::Status ClPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: ClPooling3dWorkload.cpp:18
armnn::ClStackWorkloadValidate
arm_compute::Status ClStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: ClStackWorkload.cpp:29
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
ClSliceWorkload.hpp
ClTransposeWorkload.hpp
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
ClStridedSliceWorkload.hpp
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Softmax
@ Softmax
ClL2NormalizationFloatWorkload.hpp
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::ClLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1623
armnn::ClComparisonWorkloadValidate
arm_compute::Status ClComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: ClComparisonWorkload.cpp:24
armnn::ClTileWorkloadValidate
arm_compute::Status ClTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: ClTileWorkload.cpp:16
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::ClLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &ouput, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:821
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
ClConcatWorkload.hpp
armnn::ClLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1342
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
ClConvertFp16ToFp32Workload.hpp
ClFullyConnectedWorkload.hpp
armnn::ClConvertFp32ToFp16WorkloadValidate
arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp32ToFp16Workload.cpp:44
armnn::ClTransposeWorkloadValidate
arm_compute::Status ClTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: ClTransposeWorkload.cpp:17
armnn::ClScatterNdWorkloadValidate
arm_compute::Status ClScatterNdWorkloadValidate(const TensorInfo &inputInfo, const TensorInfo &indicesInfo, const TensorInfo &updatesInfo, const TensorInfo &outputInfo, const ScatterNdDescriptor &descriptor)
Definition: ClScatterNdWorkload.cpp:20
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
ClSpaceToDepthWorkload.hpp
armnn::ClSoftmaxWorkloadValidate
arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: ClSoftmaxWorkload.cpp:17
armnn::ClReverseV2WorkloadValidate
arm_compute::Status ClReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)
Definition: ClReverseV2Workload.cpp:16
armnn::LayerType::Addition
@ Addition
armnn::ClPadValidate
arm_compute::Status ClPadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: ClPadWorkload.cpp:62
ClNegWorkload.hpp
armnn::ClLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1615
armnn::ClLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1587
ArmComputeUtils.hpp
armnn::ClInstanceNormalizationWorkloadValidate
arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: ClInstanceNormalizationWorkload.cpp:18
armnn::ClGatherWorkloadValidate
arm_compute::Status ClGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: ClGatherWorkload.cpp:15
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
ClLogicalNotWorkload.hpp
ClLstmFloatWorkload.hpp
armnn::ClSliceWorkloadValidate
arm_compute::Status ClSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: ClSliceWorkload.cpp:18
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::ClBatchMatMulValidate
arm_compute::Status ClBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchMatMulWorkload.cpp:24
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::ClLstmFloatWorkloadValidate
arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: ClLstmFloatWorkload.cpp:244
armnn::BinaryOperation::Power
@ Power
armnn::ClLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1493
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
ClQuantizeWorkload.hpp
armnn::ClLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1175
armnn::ClLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:799
ClBatchNormalizationFloatWorkload.hpp
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::ClSqrtWorkloadValidate
arm_compute::Status ClSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSqrtWorkload.cpp:19
armnn::LayerType::Division
@ Division
armnn::LayerType::Shape
@ Shape
armnn::ClNormalizationWorkloadValidate
arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: ClNormalizationFloatWorkload.cpp:19
armnn::ClLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1027
ClLogicalAndWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ClLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1157
armnn::ClLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:980
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
ClPadWorkload.hpp
ClConvolution2dWorkload.hpp
ClRsqrtWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::ClLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:738
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::ClLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:787
ClConvertFp32ToFp16Workload.hpp
ClBackendId.hpp
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ClLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1040
ClAdditionWorkload.hpp
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
ClConvolution3dWorkload.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::ClLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1441
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:131
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::ClPooling2dWorkloadValidate
arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: ClPooling2dWorkload.cpp:18
armnn::ClLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1092
armnn::ClLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:937
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::ClMinimumWorkloadValidate
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMinimumWorkload.cpp:24
armnn::ClSpaceToBatchNdWorkloadValidate
arm_compute::Status ClSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: ClSpaceToBatchNdWorkload.cpp:16
armnn::Status
Status
Definition: Types.hpp:42
armnn::BinaryOperation::FloorDiv
@ FloorDiv
armnn::ClLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1392
ClMinimumWorkload.hpp
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::ClLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1574
ClStackWorkload.hpp
armnn::LayerType::Reshape
@ Reshape
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::ClLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1326
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::ClConvolution3dWorkloadValidate
arm_compute::Status ClConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: ClConvolution3dWorkload.cpp:23
armnn::ClLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1145
armnn::LayerType::Fill
@ Fill
armnn::ClBatchNormalizationValidate
arm_compute::Status ClBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchNormalizationFloatWorkload.cpp:19
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:307
armnn::ClQuantizedLstmWorkloadValidate
arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: ClQuantizedLstmWorkload.cpp:18
armnn::ClLogSoftmaxWorkloadValidate
arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: ClLogSoftmaxWorkload.cpp:17
armnn::LayerType::Minimum
@ Minimum
ClSplitterWorkload.hpp
ClFloorDivWorkload.hpp
armnn::ClDivisionWorkloadValidate
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: ClDivisionWorkload.cpp:18
ClBatchMatMulWorkload.hpp
armnn::ClConcatWorkloadValidate
arm_compute::Status ClConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: ClConcatWorkload.cpp:27
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::ClLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1517
ClMaximumWorkload.hpp
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
ClSubtractionWorkload.hpp
armnn::ClLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:970
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::ClPreluWorkloadValidate
arm_compute::Status ClPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: ClPreluWorkload.cpp:16
armnn::ClLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1131
armnn::LayerType::Pad
@ Pad
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
Definition: WorkloadUtils.cpp:377
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::ClSinWorkloadValidate
arm_compute::Status ClSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClSinWorkload.cpp:18
armnn::LayerType::Rank
@ Rank
armnn::ClAbsWorkloadValidate
arm_compute::Status ClAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClAbsWorkload.cpp:19
armnn::LayerType::Mean
@ Mean
ArmComputeTensorUtils.hpp
armnn::UnaryOperation::Abs
@ Abs
InternalTypes.hpp
armnn::ClLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1562
ClMultiplicationWorkload.hpp
armnn::ClLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1550
ClAbsWorkload.hpp
ClTileWorkload.hpp
armnn::LayerType::Input
@ Input
ClReshapeWorkload.hpp
armnn::ClConvertFp16ToFp32WorkloadValidate
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClConvertFp16ToFp32Workload.cpp:44
armnn::LayerType::Resize
@ Resize
armnn::ClLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1215
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
ClSoftmaxWorkload.hpp
ClFillWorkload.hpp
ClFloorFloatWorkload.hpp
armnn::BinaryOperation::Div
@ Div
armnn::ClLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:876
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::ClLogicalOrWorkloadValidate
arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClLogicalOrWorkload.cpp:20
armnn::ClDequantizeWorkloadValidate
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClDequantizeWorkload.cpp:22
armnn::LayerType::Lstm
@ Lstm
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:98
ClLogicalOrWorkload.hpp
armnn::LayerType::Dequantize
@ Dequantize
armnn::ScatterNdDescriptor
A ScatterNdDescriptor for the ScatterNdLayer.
Definition: Descriptors.hpp:1679
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::ClReshapeWorkloadValidate
arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClReshapeWorkload.cpp:15
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:496
armnn::ClLayerSupport::IsScatterNdSupported
bool IsScatterNdSupported(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &updates, const TensorInfo &output, const ScatterNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1461
armnn::ClDepthwiseConvolutionWorkloadValidate
arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: ClDepthwiseConvolutionWorkload.cpp:26
ClScatterNdWorkload.hpp
armnn::LayerType::Unmap
@ Unmap
ClPermuteWorkload.hpp
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::LayerType::QLstm
@ QLstm
ClSqrtWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
armnn::ClArgMinMaxWorkloadValidate
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: ClArgMinMaxWorkload.cpp:31
ClQLstmWorkload.hpp
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::ClLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: ClLayerSupport.cpp:1449
armnn::ClLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1009
armnn::ClLogicalNotWorkloadValidate
arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClLogicalNotWorkload.cpp:20
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::ClLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:894
armnn::ClBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: ClBackendModelContext.cpp:66
armnn::ClLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: ClLayerSupport.cpp:1358
ClChannelShuffleWorkload.hpp