ArmNN
 24.08
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
8 
9 #include <armnn/Exceptions.hpp>
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
18 
19 #if defined(ARMCOMPUTENEON_ENABLED)
91 #endif
92 
93 namespace armnn
94 {
95 
96 namespace
97 {
98 
99 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
100 {
101  if (!type)
102  {
103  return info;
104  }
105  if (info.HasMultipleQuantizationScales())
106  {
107  return TensorInfo(info.GetShape(),
108  type.value(),
109  info.GetQuantizationScales(),
110  info.GetQuantizationDim().value(),
111  info.IsConstant());
112  }
113  else
114  {
115  return TensorInfo(info.GetShape(),
116  type.value(),
117  info.GetQuantizationScale(),
118  info.GetQuantizationOffset(),
119  info.IsConstant());
120  }
121 }
122 
123 template< typename ... Args>
124 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
125 {
126  IgnoreUnused(reasonIfUnsupported, (args)...);
127 #if defined(ARMCOMPUTENEON_ENABLED)
128  return true;
129 #else
130  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
131  return false;
132 #endif
133 }
134 
135 template<typename FloatFunc, typename Uint8Func, typename ... Params>
136 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
137  DataType dataType,
138  FloatFunc floatFuncPtr,
139  Uint8Func uint8FuncPtr,
140  Params&&... params)
141 {
142  return IsNeonBackendSupported(reasonIfUnsupported) &&
143  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
144  dataType,
145  floatFuncPtr,
146  floatFuncPtr,
147  uint8FuncPtr,
148  &FalseFunc<>,
149  &FalseFunc<>,
150  std::forward<Params>(params)...);
151 }
152 
153 #if defined(ARMCOMPUTENEON_ENABLED)
154 template<class FuncType, class... Args>
155 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
156 {
157  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
158  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
159  if (!supported && reasonIfUnsupported)
160  {
161  reasonIfUnsupported.value() = aclStatus.error_description();
162  }
163  return supported;
164 }
165 
166 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
167  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
168 #else
169 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
170  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
171 #endif
172 } // anonymous namespace
173 
175  : m_ModelContextPtr(modelContextPtr)
176 {
177 }
178 
180  : m_ModelContextPtr(nullptr)
181 {
182 }
183 
185  const std::vector<TensorInfo>& infos,
186  const BaseDescriptor& descriptor,
187  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
188  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
189  Optional<std::string&> reasonIfUnsupported,
190  const NeonLayerSupport& support)
191 {
192  switch (type)
193  {
195  return support.IsActivationSupported(infos[0],
196  infos[1],
197  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
198  reasonIfUnsupported);
199  case LayerType::Addition:
200  return support.IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
202  return support.IsArgMinMaxSupported(infos[0],
203  infos[1],
204  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
205  reasonIfUnsupported);
207  return support.IsBatchMatMulSupported(infos[0],
208  infos[1],
209  infos[2],
210  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
211  reasonIfUnsupported);
213  return support.IsBatchNormalizationSupported(infos[0],
214  infos[1],
215  infos[2],
216  infos[3],
217  infos[4],
218  infos[5],
219  *(PolymorphicDowncast<const
220  BatchNormalizationDescriptor*>(&descriptor)),
221  reasonIfUnsupported);
223  return support.IsBatchToSpaceNdSupported(infos[0],
224  infos[1],
225  *(PolymorphicDowncast<const
226  BatchToSpaceNdDescriptor*>(&descriptor)),
227  reasonIfUnsupported);
228  case LayerType::Cast:
229  return support.IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
231  return support.IsChannelShuffleSupported(infos[0],
232  infos[1],
233  *(PolymorphicDowncast<const
234  ChannelShuffleDescriptor*>(&descriptor)),
235  reasonIfUnsupported);
237  return support.IsComparisonSupported(infos[0],
238  infos[1],
239  infos[2],
240  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
241  reasonIfUnsupported);
242  case LayerType::Concat:
243  {
244  std::vector<const TensorInfo*> inputInfos;
245  for (uint32_t i = 0; i < (infos.size() - 1); i++)
246  {
247  inputInfos.push_back(&infos[i]);
248  }
249  return support.IsConcatSupported(inputInfos,
250  infos[infos.size() - 1],
251  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
252  reasonIfUnsupported);
253  }
254  case LayerType::Constant:
255  return support.IsConstantSupported(infos[0], reasonIfUnsupported);
257  return support.IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
259  return support.IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
261  {
262  if (infos.size() != 4)
263  {
264  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
265  "TensorInfos should be of format: {input, output, weights, biases}.");
266  }
267 
268  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
269  if (infos[3] == TensorInfo())
270  {
271  return support.IsConvolution2dSupported(infos[0],
272  infos[1],
273  desc,
274  infos[2],
275  EmptyOptional(),
276  reasonIfUnsupported);
277  }
278  else
279  {
280  return support.IsConvolution2dSupported(infos[0],
281  infos[1],
282  desc,
283  infos[2],
284  infos[3],
285  reasonIfUnsupported);
286  }
287  }
289  {
290  if (infos.size() != 4)
291  {
292  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
293  "TensorInfos should be of format: {input, output, weights, biases}.");
294  }
295 
296  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
297  if (infos[3] == TensorInfo())
298  {
299  return support.IsConvolution3dSupported(infos[0],
300  infos[1],
301  desc,
302  infos[2],
303  EmptyOptional(),
304  reasonIfUnsupported);
305  }
306  else
307  {
308  return support.IsConvolution3dSupported(infos[0],
309  infos[1],
310  desc,
311  infos[2],
312  infos[3],
313  reasonIfUnsupported);
314  }
315  }
317  return support.IsDepthToSpaceSupported(infos[0],
318  infos[1],
319  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
320  reasonIfUnsupported);
322  {
323  if (infos.size() != 4)
324  {
325  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
326  "TensorInfos should be of format: {input, output, weights, biases}.");
327  }
328 
329  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
330  if (infos[3] == TensorInfo())
331  {
332  return support.IsDepthwiseConvolutionSupported(infos[0],
333  infos[1],
334  desc,
335  infos[2],
336  EmptyOptional(),
337  reasonIfUnsupported);
338  }
339  else
340  {
341  return support.IsDepthwiseConvolutionSupported(infos[0],
342  infos[1],
343  desc,
344  infos[2],
345  infos[3],
346  reasonIfUnsupported);
347  }
348  }
350  return support.IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
352  {
353  auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
354  return support.IsDetectionPostProcessSupported(infos[0],
355  infos[1],
356  infos[2],
357  infos[3],
358  infos[4],
359  infos[5],
360  infos[6],
361  desc,
362  reasonIfUnsupported);
363  }
364  case LayerType::Division:
365  return support.IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
367  {
368  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
369 
370  switch (desc.m_Operation)
371  {
374  reasonIfUnsupported,
375  infos[0],
376  infos[1],
377  infos[2],
378  nullptr);
381  reasonIfUnsupported,
382  infos[0],
383  infos[1],
384  infos[2],
385  nullptr);
388  reasonIfUnsupported,
389  infos[0],
390  infos[1],
391  infos[2]);
394  reasonIfUnsupported,
395  infos[0],
396  infos[1],
397  infos[2]);
400  reasonIfUnsupported,
401  infos[0],
402  infos[1],
403  infos[2],
404  nullptr);
408  reasonIfUnsupported,
409  infos[0],
410  infos[1],
411  infos[2],
412  desc,
413  nullptr);
416  reasonIfUnsupported,
417  infos[0],
418  infos[1],
419  infos[2],
420  nullptr);
421  default:
422  return false;
423  }
424  }
426  return support.IsElementwiseUnarySupported(infos[0],
427  infos[1],
428  *(PolymorphicDowncast<const
429  ElementwiseUnaryDescriptor*>(&descriptor)),
430  reasonIfUnsupported);
431  case LayerType::Fill:
432  return support.IsFillSupported(infos[0],
433  infos[1],
434  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
435  reasonIfUnsupported);
436  case LayerType::Floor:
437  return support.IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
439  return support.IsFullyConnectedSupported(infos[0],
440  infos[1],
441  infos[2],
442  infos[3],
443  *(PolymorphicDowncast<const
444  FullyConnectedDescriptor*>(&descriptor)),
445  reasonIfUnsupported);
446  case LayerType::Fused:
447  {
448  auto fusedDescriptor = *(PolymorphicDowncast<const FusedDescriptor*>(&descriptor));
449  if (fusedDescriptor.m_NumInputSlots + fusedDescriptor.m_NumOutputSlots != infos.size())
450  {
451  throw InvalidArgumentException("Invalid number of FusedLayer TensorInfos.");
452  }
453 
454  auto it = infos.begin() + numeric_cast<TensorInfo::DifferenceType>(fusedDescriptor.m_NumInputSlots);
455  std::vector<TensorInfo> inputInfos(infos.begin(), it);
456  std::vector<TensorInfo> outputInfos(it, infos.end());
457 
458  return support.IsFusedSupported({inputInfos.begin(), inputInfos.end()},
459  {outputInfos.begin(), outputInfos.end()},
460  fusedDescriptor,
461  reasonIfUnsupported);
462  }
463  case LayerType::Gather:
464  return support.IsGatherSupported(infos[0],
465  infos[1],
466  infos[2],
467  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
468  reasonIfUnsupported);
469  case LayerType::GatherNd:
470  return support.IsGatherNdSupported(infos[0],
471  infos[1],
472  infos[2],
473  reasonIfUnsupported);
474  case LayerType::Input:
475  return support.IsInputSupported(infos[0], reasonIfUnsupported);
477  return support.IsInstanceNormalizationSupported(infos[0],
478  infos[1],
479  *(PolymorphicDowncast<const
480  InstanceNormalizationDescriptor*>(&descriptor)),
481  reasonIfUnsupported);
483  return support.IsL2NormalizationSupported(infos[0],
484  infos[1],
485  *(PolymorphicDowncast<const
486  L2NormalizationDescriptor*>(&descriptor)),
487  reasonIfUnsupported);
489  return support.IsLogicalBinarySupported(infos[0],
490  infos[1],
491  infos[2],
492  *(PolymorphicDowncast<const
493  LogicalBinaryDescriptor*>(&descriptor)),
494  reasonIfUnsupported);
496  return support.IsLogSoftmaxSupported(infos[0],
497  infos[1],
498  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
499  reasonIfUnsupported);
500  case LayerType::Lstm:
501  return support.IsLstmSupported(infos[0],
502  infos[1],
503  infos[2],
504  infos[3],
505  infos[4],
506  infos[5],
507  infos[6],
508  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
509  lstmParamsInfo.value(),
510  reasonIfUnsupported);
511  case LayerType::Map:
512  return true;
513  case LayerType::Maximum:
514  return support.IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
515  case LayerType::Mean:
516  return support.IsMeanSupported(infos[0],
517  infos[1],
518  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
519  reasonIfUnsupported);
520  case LayerType::MemCopy:
521  return support.IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
523  return support.IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
524  case LayerType::Merge:
525  return support.IsMergeSupported(infos[0],
526  infos[1],
527  infos[2],
528  reasonIfUnsupported);
529  case LayerType::Minimum:
530  return support.IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
532  return support.IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
534  return support.IsNormalizationSupported(infos[0],
535  infos[1],
536  *(PolymorphicDowncast<const
537  NormalizationDescriptor*>(&descriptor)),
538  reasonIfUnsupported);
539  case LayerType::Output:
540  return support.IsOutputSupported(infos[0], reasonIfUnsupported);
541  case LayerType::Pad:
542  return support.IsPadSupported(infos[0],
543  infos[1],
544  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
545  reasonIfUnsupported);
546  case LayerType::Permute:
547  return support.IsPermuteSupported(infos[0],
548  infos[1],
549  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
550  reasonIfUnsupported);
552  return support.IsPooling2dSupported(infos[0],
553  infos[1],
554  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
555  reasonIfUnsupported);
557  return support.IsPooling3dSupported(infos[0],
558  infos[1],
559  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
560  reasonIfUnsupported);
561  case LayerType::Prelu:
562  return support.IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
563  case LayerType::QLstm:
564  return support.IsQLstmSupported(infos[0],
565  infos[1],
566  infos[2],
567  infos[3],
568  infos[4],
569  infos[5],
570  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
571  lstmParamsInfo.value(),
572  reasonIfUnsupported);
573  case LayerType::Quantize:
574  return support.IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
576  return support.IsQuantizedLstmSupported(infos[0],
577  infos[1],
578  infos[2],
579  infos[3],
580  infos[4],
581  quantizedLstmParamsInfo.value(),
582  reasonIfUnsupported);
583  case LayerType::Rank:
584  return true;
585  case LayerType::Reshape:
586  return support.IsReshapeSupported(infos[0],
587  infos[1],
588  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
589  reasonIfUnsupported);
590  case LayerType::Resize:
591  return support.IsResizeSupported(infos[0],
592  infos[1],
593  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
594  reasonIfUnsupported);
595  case LayerType::Reduce:
596  return support.IsReduceSupported(infos[0],
597  infos[1],
598  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
599  reasonIfUnsupported);
601  return support.IsReverseV2Supported(infos[0],
602  infos[1],
603  infos[2],
604  reasonIfUnsupported);
605  case LayerType::Shape:
606  return support.IsShapeSupported(infos[0],
607  infos[1],
608  reasonIfUnsupported);
609  case LayerType::Slice:
610  return support.IsSliceSupported(infos[0],
611  infos[1],
612  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
613  reasonIfUnsupported);
614  case LayerType::Softmax:
615  return support.IsSoftmaxSupported(infos[0],
616  infos[1],
617  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
618  reasonIfUnsupported);
620  return support.IsSpaceToBatchNdSupported(infos[0],
621  infos[1],
622  *(PolymorphicDowncast<const
623  SpaceToBatchNdDescriptor*>(&descriptor)),
624  reasonIfUnsupported);
626  return support.IsSpaceToDepthSupported(infos[0],
627  infos[1],
628  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
629  reasonIfUnsupported);
630  case LayerType::Splitter:
631  {
632  std::vector<TensorInfo> outputInfos;
633  for (uint32_t i = 1; i < infos.size(); i++)
634  {
635  outputInfos.push_back(infos[i]);
636  }
637  return support.IsSplitterSupported(infos[0],
638  {outputInfos.begin(), outputInfos.end()},
639  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
640  reasonIfUnsupported);
641  }
642  case LayerType::Stack:
643  {
644  std::vector<const TensorInfo*> inputInfos;
645  for (uint32_t i = 0; i < infos.size() - 1; i++)
646  {
647  inputInfos.push_back(&infos[i]);
648  }
649  return support.IsStackSupported(inputInfos,
650  infos[infos.size() - 1],
651  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
652  reasonIfUnsupported);
653  }
655  return support.IsStridedSliceSupported(infos[0],
656  infos[1],
657  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
658  reasonIfUnsupported);
660  return support.IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
661  case LayerType::Tile:
662  return support.IsTileSupported(infos[0],
663  infos[1],
664  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
665  reasonIfUnsupported);
667  return support.IsTransposeSupported(infos[0],
668  infos[1],
669  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
670  reasonIfUnsupported);
672  {
673  if (infos.size() != 4)
674  {
675  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
676  "TensorInfos should be of format: {input, output, weights, biases}.");
677  }
678 
679  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
680  if (infos[3] == TensorInfo())
681  {
682  return support.IsTransposeConvolution2dSupported(infos[0],
683  infos[1],
684  desc,
685  infos[2],
686  EmptyOptional(),
687  reasonIfUnsupported);
688  }
689  else
690  {
691  return support.IsTransposeConvolution2dSupported(infos[0],
692  infos[1],
693  desc,
694  infos[2],
695  infos[3],
696  reasonIfUnsupported);
697  }
698  }
700  {
701  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
702  return support.IsUnidirectionalSequenceLstmSupported(infos[0],
703  infos[1],
704  infos[2],
705  infos[3],
706  infos[4],
707  infos[5],
708  desc,
709  lstmParamsInfo.value(),
710  reasonIfUnsupported);
711  }
712  case LayerType::Unmap:
713  return true;
714  default:
715  // layers not supported in neon by default:
716  // debug, fakequantization, precompiled,
717  // standin, switch
718  return false;
719  }
720 }
721 
723  const std::vector<TensorInfo>& infos,
724  const BaseDescriptor& descriptor,
725  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
726  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
727  Optional<std::string&> reasonIfUnsupported) const
728 {
729  bool isSupported = IsLayerTypeSupported(type,
730  infos,
731  descriptor,
732  lstmParamsInfo,
733  quantizedLstmParamsInfo,
734  reasonIfUnsupported,
735  *this);
736 
737  // For android-nn-driver and support library, to run FP16 operations on CpuAcc we need at least v8.2
738  // architecture. If the available architecture is older than v8.2, we can check if the operator is
739  // supported by changing operator inputs & outputs to be FP32.
740  // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
741  // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
742  // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
743  // and, insert convert layers around the FP32 operator.
744  if (reasonIfUnsupported.has_value())
745  {
746  std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
747  if (!isSupported
748  && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
749  {
750  std::vector<TensorInfo> newInfos;
751  for (auto info: infos)
752  {
753  newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
754  }
755 
756  std::string tmpString;
757  return IsLayerTypeSupported(type,
758  newInfos,
759  descriptor,
760  lstmParamsInfo,
761  quantizedLstmParamsInfo,
762  tmpString,
763  *this);
764  }
765  }
766 
767  return isSupported;
768 }
769 
771  const TensorInfo& output,
772  const ActivationDescriptor& descriptor,
773  Optional<std::string&> reasonIfUnsupported) const
774 {
775  IgnoreUnused(descriptor);
777  reasonIfUnsupported,
778  input,
779  output,
780  descriptor);
781 }
782 
784  const TensorInfo& input1,
785  const TensorInfo& output,
786  Optional<std::string&> reasonIfUnsupported) const
787 {
789  reasonIfUnsupported,
790  input0,
791  input1,
792  output,
793  nullptr);
794 }
795 
797  const TensorInfo& output,
798  const ArgMinMaxDescriptor& descriptor,
799  Optional<std::string&> reasonIfUnsupported) const
800 {
802  reasonIfUnsupported,
803  input,
804  output,
805  descriptor);
806 }
807 
809  const TensorInfo& inputY,
810  const TensorInfo& output,
811  const BatchMatMulDescriptor& descriptor,
812  Optional<std::string&> reasonIfUnsupported) const
813 {
814  bool isFastMathEnabled = false;
815 #if defined(ARMCOMPUTENEON_ENABLED)
816  if (m_ModelContextPtr)
817  {
818  if (m_ModelContextPtr.get() != nullptr)
819  {
820  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
821  if (modelOptions)
822  {
823  isFastMathEnabled = modelOptions->IsFastMathEnabled();
824  }
825  }
826  }
827 #endif
829  reasonIfUnsupported,
830  inputX,
831  inputY,
832  output,
833  descriptor,
834  isFastMathEnabled,
835  nullptr);
836 }
837 
839  const TensorInfo& output,
840  const TensorInfo& mean,
841  const TensorInfo& var,
842  const TensorInfo& beta,
843  const TensorInfo& gamma,
844  const BatchNormalizationDescriptor& descriptor,
845  Optional<std::string&> reasonIfUnsupported) const
846 {
848  reasonIfUnsupported,
849  input,
850  output,
851  mean,
852  var,
853  beta,
854  gamma,
855  descriptor,
856  nullptr);
857 }
858 
860  const TensorInfo& output,
861  const BatchToSpaceNdDescriptor& descriptor,
862  Optional<std::string&> reasonIfUnsupported) const
863 {
865  reasonIfUnsupported,
866  input,
867  output,
868  descriptor);
869 }
870 
872  const TensorInfo& output,
873  Optional<std::string&> reasonIfUnsupported) const
874 {
876  reasonIfUnsupported,
877  input,
878  output);
879 }
880 
882  const TensorInfo& output,
883  const ChannelShuffleDescriptor& descriptor,
884  Optional<std::string&> reasonIfUnsupported) const
885 {
887  reasonIfUnsupported,
888  input,
889  output,
890  descriptor);
891 }
892 
894  const TensorInfo& input1,
895  const TensorInfo& output,
896  const ComparisonDescriptor& descriptor,
897  Optional<std::string&> reasonIfUnsupported) const
898 {
899 
901  reasonIfUnsupported,
902  input0,
903  input1,
904  output,
905  descriptor);
906 }
907 
908 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
909  const TensorInfo& output,
910  const OriginsDescriptor& descriptor,
911  Optional<std::string&> reasonIfUnsupported) const
912 {
913  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
914  {
915  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
916  return false;
917  }
918 
919  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
920  if(concatInnerAxis < 3) // Width, height, or channels
921  {
923  reasonIfUnsupported,
924  inputs,
925  output,
926  descriptor);
927  }
928  else if (concatInnerAxis == 3)
929  {
930  for (auto& input : inputs)
931  {
932  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
933  {
934  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
935  return false;
936  }
937  }
938  return true; // Sub-tensors support concat along batch
939  }
940  else // > 4 dimensions not supported.
941  {
942  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
943  return false;
944  }
945 }
946 
948  Optional<std::string&> reasonIfUnsupported) const
949 {
951  reasonIfUnsupported,
952  output);
953 }
954 
956  const TensorInfo& output,
957  Optional<std::string&> reasonIfUnsupported) const
958 {
960  reasonIfUnsupported,
961  input,
962  output);
963 }
964 
966  const TensorInfo& output,
967  Optional<std::string&> reasonIfUnsupported) const
968 {
970  reasonIfUnsupported,
971  input,
972  output);
973 }
974 
976  const TensorInfo& output,
977  const Convolution2dDescriptor& descriptor,
978  const TensorInfo& weights,
979  const Optional<TensorInfo>& biases,
980  Optional<std::string&> reasonIfUnsupported) const
981 {
982  bool isFastMathEnabled = false;
983 #if defined(ARMCOMPUTENEON_ENABLED)
984  if (m_ModelContextPtr)
985  {
986  if (m_ModelContextPtr.get() != nullptr)
987  {
988  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
989  if (modelOptions)
990  {
991  isFastMathEnabled = modelOptions->IsFastMathEnabled();
992  }
993  }
994  }
995 #endif
996 
998  reasonIfUnsupported,
999  input,
1000  output,
1001  descriptor,
1002  weights,
1003  biases,
1004  isFastMathEnabled,
1005  nullptr);
1006 }
1007 
1009  const TensorInfo& output,
1010  const Convolution3dDescriptor& descriptor,
1011  const TensorInfo& weights,
1012  const Optional<TensorInfo>& biases,
1013  Optional<std::string&> reasonIfUnsupported) const
1014 {
1015  bool isFastMathEnabled = false;
1016 #if defined(ARMCOMPUTENEON_ENABLED)
1017  if (m_ModelContextPtr)
1018  {
1019  if (m_ModelContextPtr.get() != nullptr)
1020  {
1021  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
1022  if (modelOptions)
1023  {
1024  isFastMathEnabled = modelOptions->IsFastMathEnabled();
1025  }
1026  }
1027  }
1028 #endif
1029 
1031  reasonIfUnsupported,
1032  input,
1033  output,
1034  descriptor,
1035  weights,
1036  biases,
1037  isFastMathEnabled,
1038  nullptr);
1039 }
1040 
1042  const TensorInfo& output,
1043  const DepthToSpaceDescriptor& descriptor,
1044  Optional<std::string&> reasonIfUnsupported) const
1045 {
1047  reasonIfUnsupported,
1048  input,
1049  output,
1050  descriptor);
1051 }
1052 
1054  const TensorInfo& output,
1055  const DepthwiseConvolution2dDescriptor& descriptor,
1056  const TensorInfo& weights,
1057  const Optional<TensorInfo>& biases,
1058  Optional<std::string&> reasonIfUnsupported) const
1059 {
1061  reasonIfUnsupported,
1062  input,
1063  output,
1064  descriptor,
1065  weights,
1066  biases,
1067  nullptr);
1068 }
1069 
1071  const TensorInfo& output,
1072  Optional<std::string&> reasonIfUnsupported) const
1073 {
1075  reasonIfUnsupported,
1076  input,
1077  output);
1078 }
1079 
1081  const TensorInfo& output,
1082  const DepthwiseConvolution2dDescriptor& descriptor,
1083  const TensorInfo& weights,
1084  const Optional<TensorInfo>& biases,
1085  Optional<std::string&> reasonIfUnsupported) const
1086 {
1088  reasonIfUnsupported,
1089  input,
1090  output,
1091  descriptor,
1092  weights,
1093  biases,
1094  nullptr);
1095 }
1096 
1098  const TensorInfo& output,
1099  const ElementwiseUnaryDescriptor& descriptor,
1100  Optional<std::string&> reasonIfUnsupported) const
1101 {
1102  switch(descriptor.m_Operation)
1103  {
1104  case UnaryOperation::Abs:
1106  reasonIfUnsupported,
1107  input,
1108  output);
1109  case UnaryOperation::Exp:
1111  reasonIfUnsupported,
1112  input,
1113  output);
1116  reasonIfUnsupported,
1117  input,
1118  output);
1119  case UnaryOperation::Log:
1121  reasonIfUnsupported,
1122  input,
1123  output);
1124  case UnaryOperation::Neg:
1126  reasonIfUnsupported,
1127  input,
1128  output);
1129  case UnaryOperation::Rsqrt:
1131  reasonIfUnsupported,
1132  input,
1133  output);
1134  case UnaryOperation::Sin:
1136  reasonIfUnsupported,
1137  input,
1138  output);
1139  case UnaryOperation::Sqrt:
1141  reasonIfUnsupported,
1142  input,
1143  output);
1144  default:
1145  return false;
1146  }
1147 }
1148 
1150  const TensorInfo& output,
1151  const FillDescriptor& descriptor,
1152  Optional<std::string&> reasonIfUnsupported) const
1153 {
1154  armnn::IgnoreUnused(input);
1155  armnn::IgnoreUnused(output);
1156  armnn::IgnoreUnused(descriptor);
1157 
1158  return IsNeonBackendSupported(reasonIfUnsupported);
1159 }
1160 
1162  const TensorInfo& output,
1163  Optional<std::string&> reasonIfUnsupported) const
1164 {
1165  armnn::IgnoreUnused(output);
1166  return IsNeonBackendSupported(reasonIfUnsupported) &&
1167  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1168  input.GetDataType(),
1169  &FalseFuncF16<>,
1170  &TrueFunc<>,
1171  &FalseFuncU8<>,
1172  &FalseFuncI32<>,
1173  &FalseFuncU8<>);
1174 }
1175 
1177  const TensorInfo& output,
1178  const TensorInfo& weights,
1179  const TensorInfo& biases,
1180  const FullyConnectedDescriptor& descriptor,
1181  Optional<std::string&> reasonIfUnsupported) const
1182 {
1184  reasonIfUnsupported,
1185  input,
1186  output,
1187  weights,
1188  biases,
1189  descriptor,
1190  nullptr);
1191 }
1192 
1193 bool NeonLayerSupport::IsFusedSupported(const std::vector<std::reference_wrapper<TensorInfo>>& inputs,
1194  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1195  const FusedDescriptor& descriptor,
1196  Optional<std::string&> reasonIfUnsupported) const
1197 {
1199  reasonIfUnsupported,
1200  inputs,
1201  outputs,
1202  descriptor,
1203  nullptr);
1204 }
1205 
1207  const TensorInfo& input1,
1208  const TensorInfo& output,
1209  const GatherDescriptor& descriptor,
1210  Optional<std::string&> reasonIfUnsupported) const
1211 {
1213  reasonIfUnsupported,
1214  input0,
1215  input1,
1216  output,
1217  descriptor);
1218 }
1219 
1221  const TensorInfo& input1,
1222  const TensorInfo& output,
1223  Optional<std::string&> reasonIfUnsupported) const
1224 {
1226  reasonIfUnsupported,
1227  input0,
1228  input1,
1229  output);
1230 }
1231 
1233  Optional<std::string&> reasonIfUnsupported) const
1234 {
1235  return IsNeonBackendSupported(reasonIfUnsupported, input);
1236 }
1237 
1239  const TensorInfo& output,
1240  const InstanceNormalizationDescriptor& descriptor,
1241  Optional<std::string&> reasonIfUnsupported) const
1242 {
1244  reasonIfUnsupported,
1245  input,
1246  output,
1247  descriptor);
1248 }
1249 
1251  const TensorInfo& output,
1252  const L2NormalizationDescriptor& descriptor,
1253  Optional<std::string&> reasonIfUnsupported) const
1254 {
1255  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1256 }
1257 
1259  const TensorInfo& input1,
1260  const TensorInfo& output,
1261  const LogicalBinaryDescriptor& descriptor,
1262  Optional<std::string&> reasonIfUnsupported) const
1263 {
1264  switch(descriptor.m_Operation)
1265  {
1268  reasonIfUnsupported,
1269  input0,
1270  input1,
1271  output);
1274  reasonIfUnsupported,
1275  input0,
1276  input1,
1277  output);
1278  default:
1279  return false;
1280  }
1281 }
1282 
1284  const TensorInfo& output,
1285  const LogSoftmaxDescriptor& descriptor,
1286  Optional<std::string&> reasonIfUnsupported) const
1287 {
1288  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1289 }
1290 
1292  const TensorInfo& outputStateIn,
1293  const TensorInfo& cellStateIn,
1294  const TensorInfo& scratchBuffer,
1295  const TensorInfo& outputStateOut,
1296  const TensorInfo& cellStateOut,
1297  const TensorInfo& output,
1298  const LstmDescriptor& descriptor,
1299  const LstmInputParamsInfo& paramsInfo,
1300  Optional<std::string&> reasonIfUnsupported) const
1301 {
1303  reasonIfUnsupported,
1304  input,
1305  outputStateIn,
1306  cellStateIn,
1307  scratchBuffer,
1308  outputStateOut,
1309  cellStateOut,
1310  output,
1311  descriptor,
1312  paramsInfo);
1313 }
1314 
1316  const TensorInfo& input1,
1317  const TensorInfo& output,
1318  Optional<std::string&> reasonIfUnsupported) const
1319 {
1321  reasonIfUnsupported,
1322  input0,
1323  input1,
1324  output);
1325 }
1326 
1328  const TensorInfo& output,
1329  const MeanDescriptor& descriptor,
1330  Optional<std::string&> reasonIfUnsupported) const
1331 {
1333  reasonIfUnsupported,
1334  input,
1335  output,
1336  descriptor);
1337 }
1338 
1340  const TensorInfo& input1,
1341  const TensorInfo& output,
1342  Optional<std::string&> reasonIfUnsupported) const
1343 {
1345  reasonIfUnsupported,
1346  input0,
1347  input1,
1348  output);
1349 }
1350 
1352  const TensorInfo& input1,
1353  const TensorInfo& output,
1354  Optional<std::string&> reasonIfUnsupported) const
1355 {
1357  reasonIfUnsupported,
1358  input0,
1359  input1,
1360  output,
1361  nullptr);
1362 }
1363 
1365  const TensorInfo& input1,
1366  const TensorInfo& output,
1367  Optional<std::string&> reasonIfUnsupported) const
1368 {
1370  reasonIfUnsupported,
1371  input0,
1372  input1,
1373  output,
1374  nullptr);
1375 }
1376 
1378  const TensorInfo& output,
1379  const NormalizationDescriptor& descriptor,
1380  Optional<std::string&> reasonIfUnsupported) const
1381 {
1383  reasonIfUnsupported,
1384  input,
1385  output,
1386  descriptor);
1387 }
1388 
1390  Optional<std::string&> reasonIfUnsupported) const
1391 {
1392  return IsNeonBackendSupported(reasonIfUnsupported, output);
1393 }
1394 
1396  const TensorInfo& output,
1397  const PadDescriptor& descriptor,
1398  Optional<std::string&> reasonIfUnsupported) const
1399 {
1401  reasonIfUnsupported,
1402  input,
1403  output,
1404  descriptor);
1405 }
1406 
1408  const TensorInfo& output,
1409  const PermuteDescriptor& descriptor,
1410  Optional<std::string&> reasonIfUnsupported) const
1411 {
1412  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1413 }
1414 
1416  const TensorInfo& output,
1417  const Pooling2dDescriptor& descriptor,
1418  Optional<std::string&> reasonIfUnsupported) const
1419 {
1420  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1421 }
1422 
1424  const TensorInfo& output,
1425  const Pooling3dDescriptor& descriptor,
1426  Optional<std::string&> reasonIfUnsupported) const
1427 {
1428  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1429 }
1430 
1432  const armnn::TensorInfo &alpha,
1433  const armnn::TensorInfo &output,
1434  armnn::Optional<std::string &> reasonIfUnsupported) const
1435 {
1436  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1437 }
1438 
1440  const TensorInfo& previousOutputIn,
1441  const TensorInfo& previousCellStateIn,
1442  const TensorInfo& outputStateOut,
1443  const TensorInfo& cellStateOut,
1444  const TensorInfo& output,
1445  const QLstmDescriptor& descriptor,
1446  const LstmInputParamsInfo& paramsInfo,
1447  Optional<std::string&> reasonIfUnsupported) const
1448 {
1449  // Check required here in order to pass IsLayerSupported for datatypes tests
1450  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1451  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1452  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1453  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1454  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1456  {
1458  reasonIfUnsupported,
1459  input,
1460  previousCellStateIn,
1461  previousOutputIn,
1462  cellStateOut,
1463  outputStateOut,
1464  output,
1465  descriptor,
1466  paramsInfo);
1467  }
1468  else
1469  {
1470  return false;
1471  }
1472 }
1473 
1475  const TensorInfo& output,
1476  Optional<std::string&> reasonIfUnsupported) const
1477 {
1479  reasonIfUnsupported,
1480  input,
1481  output);
1482 }
1483 
1485  const TensorInfo& cellStateIn,
1486  const TensorInfo& outputStateIn,
1487  const TensorInfo& cellStateOut,
1488  const TensorInfo& outputStateOut,
1489  const QuantizedLstmInputParamsInfo& paramsInfo,
1490  Optional<std::string&> reasonIfUnsupported) const
1491 {
1493  reasonIfUnsupported,
1494  input,
1495  cellStateIn,
1496  outputStateIn,
1497  cellStateOut,
1498  outputStateOut,
1499  paramsInfo);
1500 }
1501 
1503  const TensorInfo& output,
1504  const ReduceDescriptor& descriptor,
1505  Optional<std::string&> reasonIfUnsupported) const
1506 {
1508  reasonIfUnsupported,
1509  input,
1510  output,
1511  descriptor);
1512 }
1513 
1515  const TensorInfo& output,
1516  const ReshapeDescriptor& descriptor,
1517  Optional<std::string&> reasonIfUnsupported) const
1518 {
1519  armnn::IgnoreUnused(descriptor);
1521  reasonIfUnsupported,
1522  input,
1523  output);
1524 }
1525 
1527  const TensorInfo& output,
1528  const ResizeDescriptor& descriptor,
1529  Optional<std::string&> reasonIfUnsupported) const
1530 {
1532  reasonIfUnsupported,
1533  input,
1534  output,
1535  descriptor);
1536 }
1537 
1539  const armnn::TensorInfo &axis,
1540  const armnn::TensorInfo &output,
1541  Optional<std::string &> reasonIfUnsupported) const
1542 {
1544  reasonIfUnsupported,
1545  input,
1546  axis,
1547  output);
1548 }
1549 
1551  const TensorInfo& output,
1552  const SliceDescriptor& descriptor,
1553  Optional<std::string&> reasonIfUnsupported) const
1554 {
1556  reasonIfUnsupported,
1557  input,
1558  output,
1559  descriptor);
1560 }
1561 
1563  const TensorInfo& output,
1564  const SoftmaxDescriptor& descriptor,
1565  Optional<std::string&> reasonIfUnsupported) const
1566 {
1567  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1568 }
1569 
1571  const TensorInfo& output,
1572  const SpaceToBatchNdDescriptor& descriptor,
1573  Optional<std::string&> reasonIfUnsupported) const
1574 {
1576  reasonIfUnsupported,
1577  input,
1578  output,
1579  descriptor);
1580 }
1581 
1583  const TensorInfo& output,
1584  const SpaceToDepthDescriptor& descriptor,
1585  Optional<std::string&> reasonIfUnsupported) const
1586 {
1588  reasonIfUnsupported,
1589  input,
1590  output,
1591  descriptor);
1592 }
1593 
1595  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1596  const ViewsDescriptor& descriptor,
1597  Optional<std::string&> reasonIfUnsupported) const
1598 {
1599 #if defined(ARMCOMPUTENEON_ENABLED)
1600  // Split along the last dimension, cannot use sub-tensors
1601  // as width and height of the sub-tensors do not match
1602  // the width and height of the parent tensor
1603  // in case of input with more than 2D.
1604  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1605  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1606  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1607  {
1609  reasonIfUnsupported,
1610  input,
1611  outputs,
1612  *splitAxis.begin());
1613  }
1614 #endif
1615  IgnoreUnused(descriptor);
1616  for (auto output : outputs)
1617  {
1618  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1619  {
1620  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1621  return false;
1622  }
1623  }
1624  return true;
1625 }
1626 
1627 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1628  const TensorInfo& output,
1629  const StackDescriptor& descriptor,
1630  Optional<std::string&> reasonIfUnsupported) const
1631 {
1633  reasonIfUnsupported,
1634  inputs,
1635  output,
1636  descriptor);
1637 }
1638 
1640  const TensorInfo& output,
1641  const StridedSliceDescriptor& descriptor,
1642  Optional<std::string&> reasonIfUnsupported) const
1643 {
1645  reasonIfUnsupported,
1646  input,
1647  output,
1648  descriptor);
1649 }
1650 
1652  const TensorInfo& input1,
1653  const TensorInfo& output,
1654  Optional<std::string&> reasonIfUnsupported) const
1655 {
1657  reasonIfUnsupported,
1658  input0,
1659  input1,
1660  output,
1661  nullptr);
1662 }
1663 
1665  const TensorInfo& output,
1666  const TileDescriptor& descriptor,
1667  Optional<std::string&> reasonIfUnsupported) const
1668 {
1670  reasonIfUnsupported,
1671  input,
1672  output,
1673  descriptor);
1674 }
1675 
1677  const TensorInfo& output,
1678  const TransposeConvolution2dDescriptor& descriptor,
1679  const TensorInfo& weights,
1680  const Optional<TensorInfo>& biases,
1681  Optional<std::string&> reasonIfUnsupported) const
1682 {
1684  reasonIfUnsupported,
1685  input,
1686  output,
1687  descriptor,
1688  weights,
1689  biases);
1690 }
1691 
1693  const TensorInfo& output,
1694  const TransposeDescriptor& descriptor,
1695  Optional<std::string&> reasonIfUnsupported) const
1696 {
1697  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1698 }
1699 
1701  const TensorInfo& outputStateIn,
1702  const TensorInfo& cellStateIn,
1703  const TensorInfo& outputStateOut,
1704  const TensorInfo& cellStateOut,
1705  const TensorInfo& output,
1706  const UnidirectionalSequenceLstmDescriptor& descriptor,
1707  const LstmInputParamsInfo& paramsInfo,
1708  Optional<std::string&> reasonIfUnsupported) const
1709 {
1710  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1711  outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1712  cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1713  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1714  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1716  {
1718  reasonIfUnsupported,
1719  input,
1720  outputStateIn,
1721  cellStateIn,
1722  outputStateOut,
1723  cellStateOut,
1724  output,
1725  descriptor,
1726  paramsInfo);
1727  }
1728  else
1729  {
1731  reasonIfUnsupported,
1732  input,
1733  outputStateIn,
1734  cellStateIn,
1735  outputStateOut,
1736  cellStateOut,
1737  output,
1738  descriptor,
1739  paramsInfo);
1740  }
1741 }
1742 
1743 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::NeonLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:783
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
NeonConcatWorkload.hpp
armnn::NeonMinimumWorkloadValidate
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
Definition: NeonMinimumWorkload.cpp:15
NeonComparisonWorkload.hpp
armnn::NeonFullyConnectedWorkloadValidate
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFullyConnectedWorkload.cpp:24
armnn::NeonSpaceToBatchNdWorkloadValidate
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: NeonSpaceToBatchNdWorkload.cpp:15
armnn::NeonLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1070
NeonConvertFp16ToFp32Workload.hpp
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::BinaryOperation::Mul
@ Mul
armnn::NeonReverseV2WorkloadValidate
arm_compute::Status NeonReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)
Definition: NeonReverseV2Workload.cpp:14
NeonAbsWorkload.hpp
NeonNegWorkload.hpp
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::NeonTileWorkloadValidate
arm_compute::Status NeonTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: NeonTileWorkload.cpp:14
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::NeonConvertFp32ToFp16WorkloadValidate
arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp32ToFp16Workload.cpp:21
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
armnn::NeonAdditionWorkloadValidate
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonAdditionWorkload.cpp:20
NeonSoftmaxWorkload.hpp
armnn::NeonLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1570
NeonExpWorkload.hpp
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::Optional
Definition: Optional.hpp:270
armnn::NeonAbsWorkloadValidate
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonAbsWorkload.cpp:17
armnn::NeonMultiplicationWorkloadValidate
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonMultiplicationWorkload.cpp:19
NeonStridedSliceWorkload.hpp
armnn::NeonLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1407
NeonNormalizationFloatWorkload.hpp
armnn::NeonBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: NeonBackendModelContext.cpp:53
armnn::NeonStackWorkloadValidate
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: NeonStackWorkload.cpp:27
NeonReverseV2Workload.hpp
armnn::NeonPooling3dWorkloadValidate
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: NeonPooling3dWorkload.cpp:15
armnn::NeonLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1627
NeonFusedWorkload.hpp
NeonAdditionWorkload.hpp
NeonMeanWorkload.hpp
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
WorkloadUtils.hpp
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::NeonNegWorkloadValidate
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonNegWorkload.cpp:17
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::NeonQLstmWorkloadValidate
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonQLstmWorkload.cpp:243
armnn::NeonGatherWorkloadValidate
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: NeonGatherWorkload.cpp:13
armnn::NeonConvolution3dWorkloadValidate
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution3dWorkload.cpp:24
armnn::NeonSubtractionWorkloadValidate
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonSubtractionWorkload.cpp:22
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::NeonInstanceNormalizationWorkloadValidate
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: NeonInstanceNormalizationWorkload.cpp:19
NeonUnidirectionalSequenceLstmFloatWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::NeonMeanWorkloadValidate
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: NeonMeanWorkload.cpp:18
armnn::NeonLayerSupport
Definition: NeonLayerSupport.hpp:14
armnn::NeonLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:808
armnn::NeonLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1423
armnn::NeonLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:965
armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmFloatWorkload.cpp:510
NeonDivisionWorkload.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::NeonLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1431
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::NeonMaximumWorkloadValidate
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonMaximumWorkload.cpp:14
NeonPermuteWorkload.hpp
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BinaryOperation::Sub
@ Sub
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
NeonBatchNormalizationWorkload.hpp
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
armnn::DataType::Float32
@ Float32
NeonPadWorkload.hpp
armnn::NeonLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1351
NeonQuantizeWorkload.hpp
NeonConvolution3dWorkload.hpp
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
NeonChannelShuffleWorkload.hpp
armnn::NeonResizeWorkloadValidate
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: NeonResizeWorkload.cpp:22
armnn::LayerType::Tile
@ Tile
armnn::NeonExpWorkloadValidate
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonExpWorkload.cpp:17
NeonLogicalAndWorkload.hpp
NeonConvolution2dWorkload.hpp
NeonPooling2dWorkload.hpp
armnn::NeonLayerSupport::IsFusedSupported
bool IsFusedSupported(const std::vector< std::reference_wrapper< TensorInfo >> &inputs, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const FusedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1193
armnn::NeonPermuteWorkloadValidate
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: NeonPermuteWorkload.cpp:15
armnn::LayerType::Stack
@ Stack
armnn::NeonLogicalNotWorkloadValidate
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogicalNotWorkload.cpp:19
BackendRegistry.hpp
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
armnn::NeonLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:947
IgnoreUnused.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::NeonLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1258
armnn::NeonBatchMatMulValidate
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchMatMulWorkload.cpp:19
armnn::NeonLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:859
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
NeonSpaceToDepthWorkload.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::DataType::QSymmS16
@ QSymmS16
armnn::NeonConvertFp16ToFp32WorkloadValidate
arm_compute::Status NeonConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp16ToFp32Workload.cpp:19
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
NumericCast.hpp
NeonReshapeWorkload.hpp
armnn::NeonLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1514
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::NeonLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:838
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::NeonLogWorkloadValidate
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogWorkload.cpp:17
armnn::NeonArgMinMaxWorkloadValidate
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: NeonArgMinMaxWorkload.cpp:31
armnn::NeonLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1220
NeonArgMinMaxWorkload.hpp
armnn::NeonLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1562
armnn::NeonLogSoftmaxWorkloadValidate
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: NeonLogSoftmaxWorkload.cpp:19
armnn::NeonNormalizationWorkloadValidate
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: NeonNormalizationFloatWorkload.cpp:49
LayerSupportCommon.hpp
armnn::NeonLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1582
armnn::NeonLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: NeonLayerSupport.cpp:722
armnn::NeonConstantWorkloadValidate
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
Definition: NeonConstantWorkload.cpp:20
armnn::LayerType::Slice
@ Slice
NeonLogSoftmaxWorkload.hpp
armnn::NeonReduceWorkloadValidate
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: NeonReduceWorkload.cpp:19
armnn::BinaryOperation::Maximum
@ Maximum
armnn::FusedDescriptor
A FusedDescriptor for the FusedLayer.
Definition: Descriptors.hpp:944
armnn::NeonLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1526
armnn::NeonPadWorkloadValidate
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: NeonPadWorkload.cpp:59
NeonGatherWorkload.hpp
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::NeonGatherNdWorkloadValidate
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: NeonGatherNdWorkload.cpp:14
armnn::NeonConcatWorkloadValidate
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: NeonConcatWorkload.cpp:27
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::NeonLstmFloatWorkloadValidate
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonLstmFloatWorkload.cpp:253
armnn::NeonLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1502
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::NeonDivisionWorkloadValidate
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonDivisionWorkload.cpp:18
armnn::NeonLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1377
NeonPooling3dWorkload.hpp
armnn::NeonLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1550
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::NeonSqrtWorkloadValidate
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSqrtWorkload.cpp:18
armnn::NeonDepthToSpaceWorkloadValidate
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: NeonDepthToSpaceWorkload.cpp:19
armnn::NeonLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1651
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1534
NeonReduceWorkload.hpp
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::NeonLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input0, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1664
armnn::UnaryOperation::Exp
@ Exp
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn::LayerSupportBase::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:85
NeonLogicalOrWorkload.hpp
armnn::NeonLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1474
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Softmax
@ Softmax
armnn::NeonTransposeWorkloadValidate
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: NeonTransposeWorkload.cpp:15
armnn::NeonSoftmaxWorkloadValidate
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: NeonSoftmaxWorkload.cpp:19
armnn::NeonLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1389
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
NeonDepthToSpaceWorkload.hpp
armnn::NeonSpaceToDepthWorkloadValidate
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: NeonSpaceToDepthWorkload.cpp:19
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::NeonLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1339
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
NeonDepthwiseConvolutionWorkload.hpp
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
armnn::NeonLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1395
NeonTransposeWorkload.hpp
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
armnn::NeonLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1041
armnn::NeonLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1692
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::LayerType::Addition
@ Addition
armnn::NeonPreluWorkloadValidate
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: NeonPreluWorkload.cpp:17
NeonBatchToSpaceNdWorkload.hpp
armnn::NeonDequantizeWorkloadValidate
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonDequantizeWorkload.cpp:22
ArmComputeUtils.hpp
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::NeonLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1484
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
NeonQLstmWorkload.hpp
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
NeonRsqrtWorkload.hpp
armnn::NeonLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:908
armnn::NeonLogicalAndWorkloadValidate
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalAndWorkload.cpp:18
NeonSubtractionWorkload.hpp
NeonCastWorkload.hpp
armnn::NeonLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1538
armnn::NeonDepthwiseConvolutionWorkloadValidate
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: NeonDepthwiseConvolutionWorkload.cpp:29
armnn::BoostLogSeverityMapping::info
@ info
armnn::BinaryOperation::Power
@ Power
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::NeonLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1176
armnn::NeonLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1097
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::NeonLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1053
armnn::LayerType::Division
@ Division
armnn::NeonQuantizedLstmWorkloadValidate
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: NeonQuantizedLstmWorkload.cpp:131
armnn::NeonActivationWorkloadValidate
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: NeonActivationWorkload.cpp:17
armnn::LayerType::Shape
@ Shape
armnn::NeonLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1149
NeonLogWorkload.hpp
NeonResizeWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::NeonQuantizeWorkloadValidate
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonQuantizeWorkload.cpp:18
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
NeonTransposeConvolution2dWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
NeonFullyConnectedWorkload.hpp
NeonSinWorkload.hpp
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::NeonBatchToSpaceNdWorkloadValidate
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: NeonBatchToSpaceNdWorkload.cpp:15
armnn::NeonCastValidate
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonCastWorkload.cpp:19
armnn::NeonLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:881
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:131
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
NeonInstanceNormalizationWorkload.hpp
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: NeonLayerSupport.cpp:166
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::NeonLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1250
Tensor.hpp
armnn::Status
Status
Definition: Types.hpp:42
armnn::NeonLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:796
armnn::NeonLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1238
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::LayerType::Reshape
@ Reshape
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::NeonLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1676
armnn::NeonSplitterWorkloadValidate
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: NeonSplitterWorkload.cpp:33
NeonSpaceToBatchNdWorkload.hpp
NeonConstantWorkload.hpp
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::NeonFusedWorkloadValidate
arm_compute::Status NeonFusedWorkloadValidate(const std::vector< std::reference_wrapper< TensorInfo >> &inputInfos, const std::vector< std::reference_wrapper< TensorInfo >> &outputInfos, const FusedDescriptor &fusedDescriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFusedWorkload.cpp:22
armnn::LayerType::Fill
@ Fill
armnn::NeonRsqrtWorkloadValidate
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonRsqrtWorkload.cpp:18
armnn::NeonUnidirectionalSequenceLstmWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:491
armnn::NeonReshapeWorkloadValidate
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonReshapeWorkload.cpp:17
armnn::LayerType::L2Normalization
@ L2Normalization
NeonLstmFloatWorkload.hpp
armnn::LayerType::Fused
@ Fused
armnn::NeonConvolution2dWorkloadValidate
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution2dWorkload.cpp:24
NeonMaximumWorkload.hpp
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::NeonBatchNormalizationValidate
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchNormalizationWorkload.cpp:24
armnn::NeonLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:871
NeonQuantizedLstmWorkload.hpp
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:307
armnn::LayerType::Minimum
@ Minimum
armnn::NeonSinWorkloadValidate
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSinWorkload.cpp:17
armnn::NeonLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1161
armnn::NeonLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1008
NeonBatchMatMulWorkload.hpp
NeonMinimumWorkload.hpp
armnn::NeonLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1232
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::NeonLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1327
armnn::NeonLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1206
NeonLogicalNotWorkload.hpp
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::NeonStridedSliceWorkloadValidate
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: NeonStridedSliceWorkload.cpp:19
armnn::LayerType::MemCopy
@ MemCopy
Exceptions.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::NeonLayerSupport::NeonLayerSupport
NeonLayerSupport()
Definition: NeonLayerSupport.cpp:179
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
Definition: WorkloadUtils.cpp:377
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
NeonSqrtWorkload.hpp
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
ArmComputeTensorUtils.hpp
armnn::UnaryOperation::Abs
@ Abs
armnn::NeonLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:955
NeonStackWorkload.hpp
NeonBackendModelContext.hpp
armnn::NeonSliceWorkloadValidate
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: NeonSliceWorkload.cpp:21
NeonPreluWorkload.hpp
armnn::NeonLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1364
armnn::LayerType::Input
@ Input
armnn::NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1080
armnn::LayerType::Resize
@ Resize
NeonElementwiseBinaryWorkload.hpp
NeonSliceWorkload.hpp
armnn::NeonLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1315
NeonGatherNdWorkload.hpp
NeonTileWorkload.hpp
armnn::NeonElementwiseBinaryWorkloadValidate
arm_compute::Status NeonElementwiseBinaryWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonElementwiseBinaryWorkload.cpp:20
armnn::NeonPooling2dWorkloadValidate
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: NeonPooling2dWorkload.cpp:22
armnn::IsLayerTypeSupported
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)
Definition: NeonLayerSupport.cpp:184
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
armnn::NeonLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:893
armnn::NeonChannelShuffleValidate
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: NeonChannelShuffleWorkload.cpp:17
armnn::NeonL2NormalizationWorkloadValidate
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: NeonL2NormalizationFloatWorkload.cpp:19
armnn::NeonComparisonWorkloadValidate
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: NeonComparisonWorkload.cpp:16
armnn::BinaryOperation::Div
@ Div
NeonMultiplicationWorkload.hpp
armnn::LayerType::Convolution2d
@ Convolution2d
NeonUnidirectionalSequenceLstmWorkload.hpp
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
NeonL2NormalizationFloatWorkload.hpp
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::LayerType::Lstm
@ Lstm
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:98
armnn::LayerType::Dequantize
@ Dequantize
armnn::NeonLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1700
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::NeonLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1439
NeonConvertFp32ToFp16Workload.hpp
armnn::NeonLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:770
armnn::NeonLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1594
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::LayerType::Unmap
@ Unmap
armnn::NeonLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1283
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::LayerType::QLstm
@ QLstm
NeonDequantizeWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
NeonSplitterWorkload.hpp
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::NeonLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1639
armnn::NeonLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1291
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
NeonLayerSupport.hpp
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
NeonActivationWorkload.hpp
armnn::NeonLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1415
armnn::NeonTransposeConvolution2dWorkloadValidate
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: NeonTransposeConvolution2dWorkload.cpp:25
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::NeonLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:975
armnn::NeonLogicalOrWorkloadValidate
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalOrWorkload.cpp:18