ArmNN
 24.11
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
8 
9 #include <armnn/Exceptions.hpp>
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
18 
19 #if defined(ARMCOMPUTENEON_ENABLED)
92 #endif
93 
94 namespace armnn
95 {
96 
97 namespace
98 {
99 
100 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
101 {
102  if (!type)
103  {
104  return info;
105  }
106  if (info.HasMultipleQuantizationScales())
107  {
108  return TensorInfo(info.GetShape(),
109  type.value(),
110  info.GetQuantizationScales(),
111  info.GetQuantizationDim().value(),
112  info.IsConstant());
113  }
114  else
115  {
116  return TensorInfo(info.GetShape(),
117  type.value(),
118  info.GetQuantizationScale(),
119  info.GetQuantizationOffset(),
120  info.IsConstant());
121  }
122 }
123 
124 template< typename ... Args>
125 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
126 {
127  IgnoreUnused(reasonIfUnsupported, (args)...);
128 #if defined(ARMCOMPUTENEON_ENABLED)
129  return true;
130 #else
131  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
132  return false;
133 #endif
134 }
135 
136 template<typename FloatFunc, typename Uint8Func, typename ... Params>
137 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
138  DataType dataType,
139  FloatFunc floatFuncPtr,
140  Uint8Func uint8FuncPtr,
141  Params&&... params)
142 {
143  return IsNeonBackendSupported(reasonIfUnsupported) &&
144  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
145  dataType,
146  floatFuncPtr,
147  floatFuncPtr,
148  uint8FuncPtr,
149  &FalseFunc<>,
150  &FalseFunc<>,
151  std::forward<Params>(params)...);
152 }
153 
154 #if defined(ARMCOMPUTENEON_ENABLED)
155 template<class FuncType, class... Args>
156 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
157 {
158  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
159  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
160  if (!supported && reasonIfUnsupported)
161  {
162  reasonIfUnsupported.value() = aclStatus.error_description();
163  }
164  return supported;
165 }
166 
167 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
168  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
169 #else
170 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
171  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
172 #endif
173 } // anonymous namespace
174 
176  : m_ModelContextPtr(modelContextPtr)
177 {
178 }
179 
181  : m_ModelContextPtr(nullptr)
182 {
183 }
184 
186  const std::vector<TensorInfo>& infos,
187  const BaseDescriptor& descriptor,
188  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
189  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
190  Optional<std::string&> reasonIfUnsupported,
191  const NeonLayerSupport& support)
192 {
193  switch (type)
194  {
196  return support.IsActivationSupported(infos[0],
197  infos[1],
198  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
199  reasonIfUnsupported);
200  case LayerType::Addition:
201  return support.IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
203  return support.IsArgMinMaxSupported(infos[0],
204  infos[1],
205  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
206  reasonIfUnsupported);
208  return support.IsBatchMatMulSupported(infos[0],
209  infos[1],
210  infos[2],
211  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
212  reasonIfUnsupported);
214  return support.IsBatchNormalizationSupported(infos[0],
215  infos[1],
216  infos[2],
217  infos[3],
218  infos[4],
219  infos[5],
220  *(PolymorphicDowncast<const
221  BatchNormalizationDescriptor*>(&descriptor)),
222  reasonIfUnsupported);
224  return support.IsBatchToSpaceNdSupported(infos[0],
225  infos[1],
226  *(PolymorphicDowncast<const
227  BatchToSpaceNdDescriptor*>(&descriptor)),
228  reasonIfUnsupported);
229  case LayerType::Cast:
230  return support.IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
232  return support.IsChannelShuffleSupported(infos[0],
233  infos[1],
234  *(PolymorphicDowncast<const
235  ChannelShuffleDescriptor*>(&descriptor)),
236  reasonIfUnsupported);
238  return support.IsComparisonSupported(infos[0],
239  infos[1],
240  infos[2],
241  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
242  reasonIfUnsupported);
243  case LayerType::Concat:
244  {
245  std::vector<const TensorInfo*> inputInfos;
246  for (uint32_t i = 0; i < (infos.size() - 1); i++)
247  {
248  inputInfos.push_back(&infos[i]);
249  }
250  return support.IsConcatSupported(inputInfos,
251  infos[infos.size() - 1],
252  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
253  reasonIfUnsupported);
254  }
255  case LayerType::Constant:
256  return support.IsConstantSupported(infos[0], reasonIfUnsupported);
258  return support.IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
260  return support.IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
262  {
263  if (infos.size() != 4)
264  {
265  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
266  "TensorInfos should be of format: {input, output, weights, biases}.");
267  }
268 
269  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
270  if (infos[3] == TensorInfo())
271  {
272  return support.IsConvolution2dSupported(infos[0],
273  infos[1],
274  desc,
275  infos[2],
276  EmptyOptional(),
277  reasonIfUnsupported);
278  }
279  else
280  {
281  return support.IsConvolution2dSupported(infos[0],
282  infos[1],
283  desc,
284  infos[2],
285  infos[3],
286  reasonIfUnsupported);
287  }
288  }
290  {
291  if (infos.size() != 4)
292  {
293  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
294  "TensorInfos should be of format: {input, output, weights, biases}.");
295  }
296 
297  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
298  if (infos[3] == TensorInfo())
299  {
300  return support.IsConvolution3dSupported(infos[0],
301  infos[1],
302  desc,
303  infos[2],
304  EmptyOptional(),
305  reasonIfUnsupported);
306  }
307  else
308  {
309  return support.IsConvolution3dSupported(infos[0],
310  infos[1],
311  desc,
312  infos[2],
313  infos[3],
314  reasonIfUnsupported);
315  }
316  }
318  return support.IsDepthToSpaceSupported(infos[0],
319  infos[1],
320  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
321  reasonIfUnsupported);
323  {
324  if (infos.size() != 4)
325  {
326  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
327  "TensorInfos should be of format: {input, output, weights, biases}.");
328  }
329 
330  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
331  if (infos[3] == TensorInfo())
332  {
333  return support.IsDepthwiseConvolutionSupported(infos[0],
334  infos[1],
335  desc,
336  infos[2],
337  EmptyOptional(),
338  reasonIfUnsupported);
339  }
340  else
341  {
342  return support.IsDepthwiseConvolutionSupported(infos[0],
343  infos[1],
344  desc,
345  infos[2],
346  infos[3],
347  reasonIfUnsupported);
348  }
349  }
351  return support.IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
353  {
354  auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
355  return support.IsDetectionPostProcessSupported(infos[0],
356  infos[1],
357  infos[2],
358  infos[3],
359  infos[4],
360  infos[5],
361  infos[6],
362  desc,
363  reasonIfUnsupported);
364  }
365  case LayerType::Division:
366  return support.IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
368  {
369  auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
370 
371  switch (desc.m_Operation)
372  {
375  reasonIfUnsupported,
376  infos[0],
377  infos[1],
378  infos[2],
379  nullptr);
382  reasonIfUnsupported,
383  infos[0],
384  infos[1],
385  infos[2],
386  nullptr);
387 
390  reasonIfUnsupported,
391  infos[0],
392  infos[1],
393  infos[2],
394  nullptr);
397  reasonIfUnsupported,
398  infos[0],
399  infos[1],
400  infos[2]);
403  reasonIfUnsupported,
404  infos[0],
405  infos[1],
406  infos[2]);
409  reasonIfUnsupported,
410  infos[0],
411  infos[1],
412  infos[2],
413  nullptr);
417  reasonIfUnsupported,
418  infos[0],
419  infos[1],
420  infos[2],
421  desc,
422  nullptr);
425  reasonIfUnsupported,
426  infos[0],
427  infos[1],
428  infos[2],
429  nullptr);
430  default:
431  return false;
432  }
433  }
435  return support.IsElementwiseUnarySupported(infos[0],
436  infos[1],
437  *(PolymorphicDowncast<const
438  ElementwiseUnaryDescriptor*>(&descriptor)),
439  reasonIfUnsupported);
440  case LayerType::Fill:
441  return support.IsFillSupported(infos[0],
442  infos[1],
443  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
444  reasonIfUnsupported);
445  case LayerType::Floor:
446  return support.IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
448  return support.IsFullyConnectedSupported(infos[0],
449  infos[1],
450  infos[2],
451  infos[3],
452  *(PolymorphicDowncast<const
453  FullyConnectedDescriptor*>(&descriptor)),
454  reasonIfUnsupported);
455  case LayerType::Fused:
456  {
457  auto fusedDescriptor = *(PolymorphicDowncast<const FusedDescriptor*>(&descriptor));
458  if (fusedDescriptor.m_NumInputSlots + fusedDescriptor.m_NumOutputSlots != infos.size())
459  {
460  throw InvalidArgumentException("Invalid number of FusedLayer TensorInfos.");
461  }
462 
463  auto it = infos.begin() + numeric_cast<TensorInfo::DifferenceType>(fusedDescriptor.m_NumInputSlots);
464  std::vector<TensorInfo> inputInfos(infos.begin(), it);
465  std::vector<TensorInfo> outputInfos(it, infos.end());
466 
467  return support.IsFusedSupported({inputInfos.begin(), inputInfos.end()},
468  {outputInfos.begin(), outputInfos.end()},
469  fusedDescriptor,
470  reasonIfUnsupported);
471  }
472  case LayerType::Gather:
473  return support.IsGatherSupported(infos[0],
474  infos[1],
475  infos[2],
476  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
477  reasonIfUnsupported);
478  case LayerType::GatherNd:
479  return support.IsGatherNdSupported(infos[0],
480  infos[1],
481  infos[2],
482  reasonIfUnsupported);
483  case LayerType::Input:
484  return support.IsInputSupported(infos[0], reasonIfUnsupported);
486  return support.IsInstanceNormalizationSupported(infos[0],
487  infos[1],
488  *(PolymorphicDowncast<const
489  InstanceNormalizationDescriptor*>(&descriptor)),
490  reasonIfUnsupported);
492  return support.IsL2NormalizationSupported(infos[0],
493  infos[1],
494  *(PolymorphicDowncast<const
495  L2NormalizationDescriptor*>(&descriptor)),
496  reasonIfUnsupported);
498  return support.IsLogicalBinarySupported(infos[0],
499  infos[1],
500  infos[2],
501  *(PolymorphicDowncast<const
502  LogicalBinaryDescriptor*>(&descriptor)),
503  reasonIfUnsupported);
505  return support.IsLogSoftmaxSupported(infos[0],
506  infos[1],
507  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
508  reasonIfUnsupported);
509  case LayerType::Lstm:
510  return support.IsLstmSupported(infos[0],
511  infos[1],
512  infos[2],
513  infos[3],
514  infos[4],
515  infos[5],
516  infos[6],
517  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
518  lstmParamsInfo.value(),
519  reasonIfUnsupported);
520  case LayerType::Map:
521  return true;
522  case LayerType::Maximum:
523  return support.IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
524  case LayerType::Mean:
525  return support.IsMeanSupported(infos[0],
526  infos[1],
527  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
528  reasonIfUnsupported);
529  case LayerType::MemCopy:
530  return support.IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
532  return support.IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
533  case LayerType::Merge:
534  return support.IsMergeSupported(infos[0],
535  infos[1],
536  infos[2],
537  reasonIfUnsupported);
538  case LayerType::Minimum:
539  return support.IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
541  return support.IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
543  return support.IsNormalizationSupported(infos[0],
544  infos[1],
545  *(PolymorphicDowncast<const
546  NormalizationDescriptor*>(&descriptor)),
547  reasonIfUnsupported);
548  case LayerType::Output:
549  return support.IsOutputSupported(infos[0], reasonIfUnsupported);
550  case LayerType::Pad:
551  return support.IsPadSupported(infos[0],
552  infos[1],
553  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
554  reasonIfUnsupported);
555  case LayerType::Permute:
556  return support.IsPermuteSupported(infos[0],
557  infos[1],
558  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
559  reasonIfUnsupported);
561  return support.IsPooling2dSupported(infos[0],
562  infos[1],
563  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
564  reasonIfUnsupported);
566  return support.IsPooling3dSupported(infos[0],
567  infos[1],
568  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
569  reasonIfUnsupported);
570  case LayerType::Prelu:
571  return support.IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
572  case LayerType::QLstm:
573  return support.IsQLstmSupported(infos[0],
574  infos[1],
575  infos[2],
576  infos[3],
577  infos[4],
578  infos[5],
579  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
580  lstmParamsInfo.value(),
581  reasonIfUnsupported);
582  case LayerType::Quantize:
583  return support.IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
585  return support.IsQuantizedLstmSupported(infos[0],
586  infos[1],
587  infos[2],
588  infos[3],
589  infos[4],
590  quantizedLstmParamsInfo.value(),
591  reasonIfUnsupported);
592  case LayerType::Rank:
593  return true;
594  case LayerType::Reshape:
595  return support.IsReshapeSupported(infos[0],
596  infos[1],
597  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
598  reasonIfUnsupported);
599  case LayerType::Resize:
600  return support.IsResizeSupported(infos[0],
601  infos[1],
602  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
603  reasonIfUnsupported);
604  case LayerType::Reduce:
605  return support.IsReduceSupported(infos[0],
606  infos[1],
607  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
608  reasonIfUnsupported);
610  return support.IsReverseV2Supported(infos[0],
611  infos[1],
612  infos[2],
613  reasonIfUnsupported);
614  case LayerType::Shape:
615  return support.IsShapeSupported(infos[0],
616  infos[1],
617  reasonIfUnsupported);
618  case LayerType::Slice:
619  return support.IsSliceSupported(infos[0],
620  infos[1],
621  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
622  reasonIfUnsupported);
623  case LayerType::Softmax:
624  return support.IsSoftmaxSupported(infos[0],
625  infos[1],
626  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
627  reasonIfUnsupported);
629  return support.IsSpaceToBatchNdSupported(infos[0],
630  infos[1],
631  *(PolymorphicDowncast<const
632  SpaceToBatchNdDescriptor*>(&descriptor)),
633  reasonIfUnsupported);
635  return support.IsSpaceToDepthSupported(infos[0],
636  infos[1],
637  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
638  reasonIfUnsupported);
639  case LayerType::Splitter:
640  {
641  std::vector<TensorInfo> outputInfos;
642  for (uint32_t i = 1; i < infos.size(); i++)
643  {
644  outputInfos.push_back(infos[i]);
645  }
646  return support.IsSplitterSupported(infos[0],
647  {outputInfos.begin(), outputInfos.end()},
648  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
649  reasonIfUnsupported);
650  }
651  case LayerType::Stack:
652  {
653  std::vector<const TensorInfo*> inputInfos;
654  for (uint32_t i = 0; i < infos.size() - 1; i++)
655  {
656  inputInfos.push_back(&infos[i]);
657  }
658  return support.IsStackSupported(inputInfos,
659  infos[infos.size() - 1],
660  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
661  reasonIfUnsupported);
662  }
664  return support.IsStridedSliceSupported(infos[0],
665  infos[1],
666  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
667  reasonIfUnsupported);
669  return support.IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
670  case LayerType::Tile:
671  return support.IsTileSupported(infos[0],
672  infos[1],
673  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
674  reasonIfUnsupported);
676  return support.IsTransposeSupported(infos[0],
677  infos[1],
678  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
679  reasonIfUnsupported);
681  {
682  if (infos.size() != 4)
683  {
684  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
685  "TensorInfos should be of format: {input, output, weights, biases}.");
686  }
687 
688  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
689  if (infos[3] == TensorInfo())
690  {
691  return support.IsTransposeConvolution2dSupported(infos[0],
692  infos[1],
693  desc,
694  infos[2],
695  EmptyOptional(),
696  reasonIfUnsupported);
697  }
698  else
699  {
700  return support.IsTransposeConvolution2dSupported(infos[0],
701  infos[1],
702  desc,
703  infos[2],
704  infos[3],
705  reasonIfUnsupported);
706  }
707  }
709  {
710  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
711  return support.IsUnidirectionalSequenceLstmSupported(infos[0],
712  infos[1],
713  infos[2],
714  infos[3],
715  infos[4],
716  infos[5],
717  desc,
718  lstmParamsInfo.value(),
719  reasonIfUnsupported);
720  }
721  case LayerType::Unmap:
722  return true;
723  default:
724  // layers not supported in neon by default:
725  // debug, fakequantization, precompiled,
726  // standin, switch
727  return false;
728  }
729 }
730 
732  const std::vector<TensorInfo>& infos,
733  const BaseDescriptor& descriptor,
734  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
735  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
736  Optional<std::string&> reasonIfUnsupported) const
737 {
738  bool isSupported = IsLayerTypeSupported(type,
739  infos,
740  descriptor,
741  lstmParamsInfo,
742  quantizedLstmParamsInfo,
743  reasonIfUnsupported,
744  *this);
745 
746  // For android-nn-driver and support library, to run FP16 operations on CpuAcc we need at least v8.2
747  // architecture. If the available architecture is older than v8.2, we can check if the operator is
748  // supported by changing operator inputs & outputs to be FP32.
749  // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
750  // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
751  // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
752  // and, insert convert layers around the FP32 operator.
753  if (reasonIfUnsupported.has_value())
754  {
755  std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
756  if (!isSupported
757  && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
758  {
759  std::vector<TensorInfo> newInfos;
760  for (auto info: infos)
761  {
762  newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
763  }
764 
765  std::string tmpString;
766  return IsLayerTypeSupported(type,
767  newInfos,
768  descriptor,
769  lstmParamsInfo,
770  quantizedLstmParamsInfo,
771  tmpString,
772  *this);
773  }
774  }
775 
776  return isSupported;
777 }
778 
780  const TensorInfo& output,
781  const ActivationDescriptor& descriptor,
782  Optional<std::string&> reasonIfUnsupported) const
783 {
784  IgnoreUnused(descriptor);
786  reasonIfUnsupported,
787  input,
788  output,
789  descriptor);
790 }
791 
793  const TensorInfo& input1,
794  const TensorInfo& output,
795  Optional<std::string&> reasonIfUnsupported) const
796 {
798  reasonIfUnsupported,
799  input0,
800  input1,
801  output,
802  nullptr);
803 }
804 
806  const TensorInfo& output,
807  const ArgMinMaxDescriptor& descriptor,
808  Optional<std::string&> reasonIfUnsupported) const
809 {
811  reasonIfUnsupported,
812  input,
813  output,
814  descriptor);
815 }
816 
818  const TensorInfo& inputY,
819  const TensorInfo& output,
820  const BatchMatMulDescriptor& descriptor,
821  Optional<std::string&> reasonIfUnsupported) const
822 {
823  bool isFastMathEnabled = false;
824 #if defined(ARMCOMPUTENEON_ENABLED)
825  if (m_ModelContextPtr)
826  {
827  if (m_ModelContextPtr.get() != nullptr)
828  {
829  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
830  if (modelOptions)
831  {
832  isFastMathEnabled = modelOptions->IsFastMathEnabled();
833  }
834  }
835  }
836 #endif
838  reasonIfUnsupported,
839  inputX,
840  inputY,
841  output,
842  descriptor,
843  isFastMathEnabled,
844  nullptr);
845 }
846 
848  const TensorInfo& output,
849  const TensorInfo& mean,
850  const TensorInfo& var,
851  const TensorInfo& beta,
852  const TensorInfo& gamma,
853  const BatchNormalizationDescriptor& descriptor,
854  Optional<std::string&> reasonIfUnsupported) const
855 {
857  reasonIfUnsupported,
858  input,
859  output,
860  mean,
861  var,
862  beta,
863  gamma,
864  descriptor,
865  nullptr);
866 }
867 
869  const TensorInfo& output,
870  const BatchToSpaceNdDescriptor& descriptor,
871  Optional<std::string&> reasonIfUnsupported) const
872 {
874  reasonIfUnsupported,
875  input,
876  output,
877  descriptor);
878 }
879 
881  const TensorInfo& output,
882  Optional<std::string&> reasonIfUnsupported) const
883 {
885  reasonIfUnsupported,
886  input,
887  output);
888 }
889 
891  const TensorInfo& output,
892  const ChannelShuffleDescriptor& descriptor,
893  Optional<std::string&> reasonIfUnsupported) const
894 {
896  reasonIfUnsupported,
897  input,
898  output,
899  descriptor);
900 }
901 
903  const TensorInfo& input1,
904  const TensorInfo& output,
905  const ComparisonDescriptor& descriptor,
906  Optional<std::string&> reasonIfUnsupported) const
907 {
908 
910  reasonIfUnsupported,
911  input0,
912  input1,
913  output,
914  descriptor);
915 }
916 
917 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
918  const TensorInfo& output,
919  const OriginsDescriptor& descriptor,
920  Optional<std::string&> reasonIfUnsupported) const
921 {
922  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
923  {
924  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
925  return false;
926  }
927 
928  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
929  if(concatInnerAxis < 3) // Width, height, or channels
930  {
932  reasonIfUnsupported,
933  inputs,
934  output,
935  descriptor);
936  }
937  else if (concatInnerAxis == 3)
938  {
939  for (auto& input : inputs)
940  {
941  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
942  {
943  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
944  return false;
945  }
946  }
947  return true; // Sub-tensors support concat along batch
948  }
949  else // > 4 dimensions not supported.
950  {
951  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
952  return false;
953  }
954 }
955 
957  Optional<std::string&> reasonIfUnsupported) const
958 {
960  reasonIfUnsupported,
961  output);
962 }
963 
965  const TensorInfo& output,
966  Optional<std::string&> reasonIfUnsupported) const
967 {
969  reasonIfUnsupported,
970  input,
971  output);
972 }
973 
975  const TensorInfo& output,
976  Optional<std::string&> reasonIfUnsupported) const
977 {
979  reasonIfUnsupported,
980  input,
981  output);
982 }
983 
985  const TensorInfo& output,
986  const Convolution2dDescriptor& descriptor,
987  const TensorInfo& weights,
988  const Optional<TensorInfo>& biases,
989  Optional<std::string&> reasonIfUnsupported) const
990 {
991  bool isFastMathEnabled = false;
992 #if defined(ARMCOMPUTENEON_ENABLED)
993  if (m_ModelContextPtr)
994  {
995  if (m_ModelContextPtr.get() != nullptr)
996  {
997  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
998  if (modelOptions)
999  {
1000  isFastMathEnabled = modelOptions->IsFastMathEnabled();
1001  }
1002  }
1003  }
1004 #endif
1005 
1007  reasonIfUnsupported,
1008  input,
1009  output,
1010  descriptor,
1011  weights,
1012  biases,
1013  isFastMathEnabled,
1014  nullptr);
1015 }
1016 
1018  const TensorInfo& output,
1019  const Convolution3dDescriptor& descriptor,
1020  const TensorInfo& weights,
1021  const Optional<TensorInfo>& biases,
1022  Optional<std::string&> reasonIfUnsupported) const
1023 {
1024  bool isFastMathEnabled = false;
1025 #if defined(ARMCOMPUTENEON_ENABLED)
1026  if (m_ModelContextPtr)
1027  {
1028  if (m_ModelContextPtr.get() != nullptr)
1029  {
1030  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
1031  if (modelOptions)
1032  {
1033  isFastMathEnabled = modelOptions->IsFastMathEnabled();
1034  }
1035  }
1036  }
1037 #endif
1038 
1040  reasonIfUnsupported,
1041  input,
1042  output,
1043  descriptor,
1044  weights,
1045  biases,
1046  isFastMathEnabled,
1047  nullptr);
1048 }
1049 
1051  const TensorInfo& output,
1052  const DepthToSpaceDescriptor& descriptor,
1053  Optional<std::string&> reasonIfUnsupported) const
1054 {
1056  reasonIfUnsupported,
1057  input,
1058  output,
1059  descriptor);
1060 }
1061 
1063  const TensorInfo& output,
1064  const DepthwiseConvolution2dDescriptor& descriptor,
1065  const TensorInfo& weights,
1066  const Optional<TensorInfo>& biases,
1067  Optional<std::string&> reasonIfUnsupported) const
1068 {
1070  reasonIfUnsupported,
1071  input,
1072  output,
1073  descriptor,
1074  weights,
1075  biases,
1076  nullptr);
1077 }
1078 
1080  const TensorInfo& output,
1081  Optional<std::string&> reasonIfUnsupported) const
1082 {
1084  reasonIfUnsupported,
1085  input,
1086  output);
1087 }
1088 
1090  const TensorInfo& output,
1091  const DepthwiseConvolution2dDescriptor& descriptor,
1092  const TensorInfo& weights,
1093  const Optional<TensorInfo>& biases,
1094  Optional<std::string&> reasonIfUnsupported) const
1095 {
1097  reasonIfUnsupported,
1098  input,
1099  output,
1100  descriptor,
1101  weights,
1102  biases,
1103  nullptr);
1104 }
1105 
1107  const TensorInfo& output,
1108  const ElementwiseUnaryDescriptor& descriptor,
1109  Optional<std::string&> reasonIfUnsupported) const
1110 {
1111  switch(descriptor.m_Operation)
1112  {
1113  case UnaryOperation::Abs:
1115  reasonIfUnsupported,
1116  input,
1117  output);
1118  case UnaryOperation::Exp:
1120  reasonIfUnsupported,
1121  input,
1122  output);
1125  reasonIfUnsupported,
1126  input,
1127  output);
1128  case UnaryOperation::Log:
1130  reasonIfUnsupported,
1131  input,
1132  output);
1133  case UnaryOperation::Neg:
1135  reasonIfUnsupported,
1136  input,
1137  output);
1138  case UnaryOperation::Rsqrt:
1140  reasonIfUnsupported,
1141  input,
1142  output);
1143  case UnaryOperation::Sin:
1145  reasonIfUnsupported,
1146  input,
1147  output);
1148  case UnaryOperation::Sqrt:
1150  reasonIfUnsupported,
1151  input,
1152  output);
1153  default:
1154  return false;
1155  }
1156 }
1157 
1159  const TensorInfo& output,
1160  const FillDescriptor& descriptor,
1161  Optional<std::string&> reasonIfUnsupported) const
1162 {
1163  armnn::IgnoreUnused(input);
1164  armnn::IgnoreUnused(output);
1165  armnn::IgnoreUnused(descriptor);
1166 
1167  return IsNeonBackendSupported(reasonIfUnsupported);
1168 }
1169 
1171  const TensorInfo& output,
1172  Optional<std::string&> reasonIfUnsupported) const
1173 {
1174  armnn::IgnoreUnused(output);
1175  return IsNeonBackendSupported(reasonIfUnsupported) &&
1176  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1177  input.GetDataType(),
1178  &FalseFuncF16<>,
1179  &TrueFunc<>,
1180  &FalseFuncU8<>,
1181  &FalseFuncI32<>,
1182  &FalseFuncU8<>);
1183 }
1184 
1186  const TensorInfo& output,
1187  const TensorInfo& weights,
1188  const TensorInfo& biases,
1189  const FullyConnectedDescriptor& descriptor,
1190  Optional<std::string&> reasonIfUnsupported) const
1191 {
1193  reasonIfUnsupported,
1194  input,
1195  output,
1196  weights,
1197  biases,
1198  descriptor,
1199  nullptr);
1200 }
1201 
1202 bool NeonLayerSupport::IsFusedSupported(const std::vector<std::reference_wrapper<TensorInfo>>& inputs,
1203  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1204  const FusedDescriptor& descriptor,
1205  Optional<std::string&> reasonIfUnsupported) const
1206 {
1208  reasonIfUnsupported,
1209  inputs,
1210  outputs,
1211  descriptor,
1212  nullptr);
1213 }
1214 
1216  const TensorInfo& input1,
1217  const TensorInfo& output,
1218  const GatherDescriptor& descriptor,
1219  Optional<std::string&> reasonIfUnsupported) const
1220 {
1222  reasonIfUnsupported,
1223  input0,
1224  input1,
1225  output,
1226  descriptor);
1227 }
1228 
1230  const TensorInfo& input1,
1231  const TensorInfo& output,
1232  Optional<std::string&> reasonIfUnsupported) const
1233 {
1235  reasonIfUnsupported,
1236  input0,
1237  input1,
1238  output);
1239 }
1240 
1242  Optional<std::string&> reasonIfUnsupported) const
1243 {
1244  return IsNeonBackendSupported(reasonIfUnsupported, input);
1245 }
1246 
1248  const TensorInfo& output,
1249  const InstanceNormalizationDescriptor& descriptor,
1250  Optional<std::string&> reasonIfUnsupported) const
1251 {
1253  reasonIfUnsupported,
1254  input,
1255  output,
1256  descriptor);
1257 }
1258 
1260  const TensorInfo& output,
1261  const L2NormalizationDescriptor& descriptor,
1262  Optional<std::string&> reasonIfUnsupported) const
1263 {
1264  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1265 }
1266 
1268  const TensorInfo& input1,
1269  const TensorInfo& output,
1270  const LogicalBinaryDescriptor& descriptor,
1271  Optional<std::string&> reasonIfUnsupported) const
1272 {
1273  switch(descriptor.m_Operation)
1274  {
1277  reasonIfUnsupported,
1278  input0,
1279  input1,
1280  output);
1283  reasonIfUnsupported,
1284  input0,
1285  input1,
1286  output);
1287  default:
1288  return false;
1289  }
1290 }
1291 
1293  const TensorInfo& output,
1294  const LogSoftmaxDescriptor& descriptor,
1295  Optional<std::string&> reasonIfUnsupported) const
1296 {
1297  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1298 }
1299 
1301  const TensorInfo& outputStateIn,
1302  const TensorInfo& cellStateIn,
1303  const TensorInfo& scratchBuffer,
1304  const TensorInfo& outputStateOut,
1305  const TensorInfo& cellStateOut,
1306  const TensorInfo& output,
1307  const LstmDescriptor& descriptor,
1308  const LstmInputParamsInfo& paramsInfo,
1309  Optional<std::string&> reasonIfUnsupported) const
1310 {
1312  reasonIfUnsupported,
1313  input,
1314  outputStateIn,
1315  cellStateIn,
1316  scratchBuffer,
1317  outputStateOut,
1318  cellStateOut,
1319  output,
1320  descriptor,
1321  paramsInfo);
1322 }
1323 
1325  const TensorInfo& input1,
1326  const TensorInfo& output,
1327  Optional<std::string&> reasonIfUnsupported) const
1328 {
1330  reasonIfUnsupported,
1331  input0,
1332  input1,
1333  output);
1334 }
1335 
1337  const TensorInfo& output,
1338  const MeanDescriptor& descriptor,
1339  Optional<std::string&> reasonIfUnsupported) const
1340 {
1342  reasonIfUnsupported,
1343  input,
1344  output,
1345  descriptor);
1346 }
1347 
1349  const TensorInfo& input1,
1350  const TensorInfo& output,
1351  Optional<std::string&> reasonIfUnsupported) const
1352 {
1354  reasonIfUnsupported,
1355  input0,
1356  input1,
1357  output);
1358 }
1359 
1361  const TensorInfo& input1,
1362  const TensorInfo& output,
1363  Optional<std::string&> reasonIfUnsupported) const
1364 {
1366  reasonIfUnsupported,
1367  input0,
1368  input1,
1369  output,
1370  nullptr);
1371 }
1372 
1374  const TensorInfo& input1,
1375  const TensorInfo& output,
1376  Optional<std::string&> reasonIfUnsupported) const
1377 {
1379  reasonIfUnsupported,
1380  input0,
1381  input1,
1382  output,
1383  nullptr);
1384 }
1385 
1387  const TensorInfo& output,
1388  const NormalizationDescriptor& descriptor,
1389  Optional<std::string&> reasonIfUnsupported) const
1390 {
1392  reasonIfUnsupported,
1393  input,
1394  output,
1395  descriptor);
1396 }
1397 
1399  Optional<std::string&> reasonIfUnsupported) const
1400 {
1401  return IsNeonBackendSupported(reasonIfUnsupported, output);
1402 }
1403 
1405  const TensorInfo& output,
1406  const PadDescriptor& descriptor,
1407  Optional<std::string&> reasonIfUnsupported) const
1408 {
1410  reasonIfUnsupported,
1411  input,
1412  output,
1413  descriptor);
1414 }
1415 
1417  const TensorInfo& output,
1418  const PermuteDescriptor& descriptor,
1419  Optional<std::string&> reasonIfUnsupported) const
1420 {
1421  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1422 }
1423 
1425  const TensorInfo& output,
1426  const Pooling2dDescriptor& descriptor,
1427  Optional<std::string&> reasonIfUnsupported) const
1428 {
1429  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1430 }
1431 
1433  const TensorInfo& output,
1434  const Pooling3dDescriptor& descriptor,
1435  Optional<std::string&> reasonIfUnsupported) const
1436 {
1437  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1438 }
1439 
1441  const armnn::TensorInfo &alpha,
1442  const armnn::TensorInfo &output,
1443  armnn::Optional<std::string &> reasonIfUnsupported) const
1444 {
1445  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1446 }
1447 
1449  const TensorInfo& previousOutputIn,
1450  const TensorInfo& previousCellStateIn,
1451  const TensorInfo& outputStateOut,
1452  const TensorInfo& cellStateOut,
1453  const TensorInfo& output,
1454  const QLstmDescriptor& descriptor,
1455  const LstmInputParamsInfo& paramsInfo,
1456  Optional<std::string&> reasonIfUnsupported) const
1457 {
1458  // Check required here in order to pass IsLayerSupported for datatypes tests
1459  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1460  previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1461  previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1462  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1463  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1465  {
1467  reasonIfUnsupported,
1468  input,
1469  previousCellStateIn,
1470  previousOutputIn,
1471  cellStateOut,
1472  outputStateOut,
1473  output,
1474  descriptor,
1475  paramsInfo);
1476  }
1477  else
1478  {
1479  return false;
1480  }
1481 }
1482 
1484  const TensorInfo& output,
1485  Optional<std::string&> reasonIfUnsupported) const
1486 {
1488  reasonIfUnsupported,
1489  input,
1490  output);
1491 }
1492 
1494  const TensorInfo& cellStateIn,
1495  const TensorInfo& outputStateIn,
1496  const TensorInfo& cellStateOut,
1497  const TensorInfo& outputStateOut,
1498  const QuantizedLstmInputParamsInfo& paramsInfo,
1499  Optional<std::string&> reasonIfUnsupported) const
1500 {
1502  reasonIfUnsupported,
1503  input,
1504  cellStateIn,
1505  outputStateIn,
1506  cellStateOut,
1507  outputStateOut,
1508  paramsInfo);
1509 }
1510 
1512  const TensorInfo& output,
1513  const ReduceDescriptor& descriptor,
1514  Optional<std::string&> reasonIfUnsupported) const
1515 {
1517  reasonIfUnsupported,
1518  input,
1519  output,
1520  descriptor);
1521 }
1522 
1524  const TensorInfo& output,
1525  const ReshapeDescriptor& descriptor,
1526  Optional<std::string&> reasonIfUnsupported) const
1527 {
1528  armnn::IgnoreUnused(descriptor);
1530  reasonIfUnsupported,
1531  input,
1532  output);
1533 }
1534 
1536  const TensorInfo& output,
1537  const ResizeDescriptor& descriptor,
1538  Optional<std::string&> reasonIfUnsupported) const
1539 {
1541  reasonIfUnsupported,
1542  input,
1543  output,
1544  descriptor);
1545 }
1546 
1548  const armnn::TensorInfo &axis,
1549  const armnn::TensorInfo &output,
1550  Optional<std::string &> reasonIfUnsupported) const
1551 {
1553  reasonIfUnsupported,
1554  input,
1555  axis,
1556  output);
1557 }
1558 
1560  const TensorInfo& output,
1561  const SliceDescriptor& descriptor,
1562  Optional<std::string&> reasonIfUnsupported) const
1563 {
1565  reasonIfUnsupported,
1566  input,
1567  output,
1568  descriptor);
1569 }
1570 
1572  const TensorInfo& output,
1573  const SoftmaxDescriptor& descriptor,
1574  Optional<std::string&> reasonIfUnsupported) const
1575 {
1576  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1577 }
1578 
1580  const TensorInfo& output,
1581  const SpaceToBatchNdDescriptor& descriptor,
1582  Optional<std::string&> reasonIfUnsupported) const
1583 {
1585  reasonIfUnsupported,
1586  input,
1587  output,
1588  descriptor);
1589 }
1590 
1592  const TensorInfo& output,
1593  const SpaceToDepthDescriptor& descriptor,
1594  Optional<std::string&> reasonIfUnsupported) const
1595 {
1597  reasonIfUnsupported,
1598  input,
1599  output,
1600  descriptor);
1601 }
1602 
1604  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1605  const ViewsDescriptor& descriptor,
1606  Optional<std::string&> reasonIfUnsupported) const
1607 {
1608 #if defined(ARMCOMPUTENEON_ENABLED)
1609  // Split along the last dimension, cannot use sub-tensors
1610  // as width and height of the sub-tensors do not match
1611  // the width and height of the parent tensor
1612  // in case of input with more than 2D.
1613  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1614  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1615  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1616  {
1618  reasonIfUnsupported,
1619  input,
1620  outputs,
1621  *splitAxis.begin());
1622  }
1623 #endif
1624  IgnoreUnused(descriptor);
1625  for (auto output : outputs)
1626  {
1627  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1628  {
1629  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1630  return false;
1631  }
1632  }
1633  return true;
1634 }
1635 
1636 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1637  const TensorInfo& output,
1638  const StackDescriptor& descriptor,
1639  Optional<std::string&> reasonIfUnsupported) const
1640 {
1642  reasonIfUnsupported,
1643  inputs,
1644  output,
1645  descriptor);
1646 }
1647 
1649  const TensorInfo& output,
1650  const StridedSliceDescriptor& descriptor,
1651  Optional<std::string&> reasonIfUnsupported) const
1652 {
1654  reasonIfUnsupported,
1655  input,
1656  output,
1657  descriptor);
1658 }
1659 
1661  const TensorInfo& input1,
1662  const TensorInfo& output,
1663  Optional<std::string&> reasonIfUnsupported) const
1664 {
1666  reasonIfUnsupported,
1667  input0,
1668  input1,
1669  output,
1670  nullptr);
1671 }
1672 
1674  const TensorInfo& output,
1675  const TileDescriptor& descriptor,
1676  Optional<std::string&> reasonIfUnsupported) const
1677 {
1679  reasonIfUnsupported,
1680  input,
1681  output,
1682  descriptor);
1683 }
1684 
1686  const TensorInfo& output,
1687  const TransposeConvolution2dDescriptor& descriptor,
1688  const TensorInfo& weights,
1689  const Optional<TensorInfo>& biases,
1690  Optional<std::string&> reasonIfUnsupported) const
1691 {
1693  reasonIfUnsupported,
1694  input,
1695  output,
1696  descriptor,
1697  weights,
1698  biases);
1699 }
1700 
1702  const TensorInfo& output,
1703  const TransposeDescriptor& descriptor,
1704  Optional<std::string&> reasonIfUnsupported) const
1705 {
1706  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1707 }
1708 
1710  const TensorInfo& outputStateIn,
1711  const TensorInfo& cellStateIn,
1712  const TensorInfo& outputStateOut,
1713  const TensorInfo& cellStateOut,
1714  const TensorInfo& output,
1715  const UnidirectionalSequenceLstmDescriptor& descriptor,
1716  const LstmInputParamsInfo& paramsInfo,
1717  Optional<std::string&> reasonIfUnsupported) const
1718 {
1719  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1720  outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1721  cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1722  outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1723  cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1725  {
1727  reasonIfUnsupported,
1728  input,
1729  outputStateIn,
1730  cellStateIn,
1731  outputStateOut,
1732  cellStateOut,
1733  output,
1734  descriptor,
1735  paramsInfo);
1736  }
1737  else
1738  {
1740  reasonIfUnsupported,
1741  input,
1742  outputStateIn,
1743  cellStateIn,
1744  outputStateOut,
1745  cellStateOut,
1746  output,
1747  descriptor,
1748  paramsInfo);
1749  }
1750 }
1751 
1752 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::NeonLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:792
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
NeonConcatWorkload.hpp
armnn::NeonMinimumWorkloadValidate
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
Definition: NeonMinimumWorkload.cpp:15
NeonComparisonWorkload.hpp
armnn::NeonFullyConnectedWorkloadValidate
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFullyConnectedWorkload.cpp:24
armnn::NeonSpaceToBatchNdWorkloadValidate
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
Definition: NeonSpaceToBatchNdWorkload.cpp:15
armnn::NeonLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1079
NeonConvertFp16ToFp32Workload.hpp
armnn::OriginsDescriptor::GetConcatAxis
unsigned int GetConcatAxis() const
Get the concatenation axis value.
Definition: Descriptors.cpp:162
armnn::BinaryOperation::Mul
@ Mul
armnn::NeonReverseV2WorkloadValidate
arm_compute::Status NeonReverseV2WorkloadValidate(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output)
Definition: NeonReverseV2Workload.cpp:14
NeonAbsWorkload.hpp
NeonNegWorkload.hpp
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::NeonTileWorkloadValidate
arm_compute::Status NeonTileWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor)
Definition: NeonTileWorkload.cpp:14
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::NeonConvertFp32ToFp16WorkloadValidate
arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp32ToFp16Workload.cpp:21
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
armnn::NeonAdditionWorkloadValidate
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonAdditionWorkload.cpp:20
NeonSoftmaxWorkload.hpp
armnn::NeonLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1579
NeonExpWorkload.hpp
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::Optional
Definition: Optional.hpp:270
armnn::NeonAbsWorkloadValidate
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonAbsWorkload.cpp:17
armnn::NeonMultiplicationWorkloadValidate
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonMultiplicationWorkload.cpp:19
NeonStridedSliceWorkload.hpp
armnn::NeonLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1416
NeonNormalizationFloatWorkload.hpp
armnn::NeonBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: NeonBackendModelContext.cpp:53
armnn::NeonStackWorkloadValidate
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
Definition: NeonStackWorkload.cpp:27
NeonReverseV2Workload.hpp
armnn::NeonPooling3dWorkloadValidate
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
Definition: NeonPooling3dWorkload.cpp:15
armnn::NeonLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1636
NeonFusedWorkload.hpp
NeonAdditionWorkload.hpp
NeonMeanWorkload.hpp
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
WorkloadUtils.hpp
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::NeonNegWorkloadValidate
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonNegWorkload.cpp:17
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::NeonQLstmWorkloadValidate
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonQLstmWorkload.cpp:243
armnn::NeonGatherWorkloadValidate
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
Definition: NeonGatherWorkload.cpp:13
armnn::NeonConvolution3dWorkloadValidate
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution3dWorkload.cpp:24
armnn::NeonSubtractionWorkloadValidate
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonSubtractionWorkload.cpp:22
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::NeonInstanceNormalizationWorkloadValidate
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
Definition: NeonInstanceNormalizationWorkload.cpp:19
NeonUnidirectionalSequenceLstmFloatWorkload.hpp
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::NeonMeanWorkloadValidate
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
Definition: NeonMeanWorkload.cpp:18
armnn::NeonLayerSupport
Definition: NeonLayerSupport.hpp:14
armnn::NeonLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:817
armnn::NeonLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1432
armnn::NeonLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:974
armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmFloatWorkload.cpp:510
NeonDivisionWorkload.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::NeonLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1440
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::NeonMaximumWorkloadValidate
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonMaximumWorkload.cpp:14
NeonPermuteWorkload.hpp
armnn::OriginsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:192
armnn::BinaryOperation::Sub
@ Sub
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
NeonBatchNormalizationWorkload.hpp
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
armnn::DataType::Float32
@ Float32
NeonPadWorkload.hpp
armnn::NeonLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1360
NeonQuantizeWorkload.hpp
NeonConvolution3dWorkload.hpp
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
NeonChannelShuffleWorkload.hpp
armnn::NeonResizeWorkloadValidate
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
Definition: NeonResizeWorkload.cpp:22
armnn::LayerType::Tile
@ Tile
armnn::NeonExpWorkloadValidate
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonExpWorkload.cpp:17
NeonLogicalAndWorkload.hpp
NeonConvolution2dWorkload.hpp
NeonPooling2dWorkload.hpp
armnn::NeonLayerSupport::IsFusedSupported
bool IsFusedSupported(const std::vector< std::reference_wrapper< TensorInfo >> &inputs, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const FusedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1202
armnn::NeonPermuteWorkloadValidate
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
Definition: NeonPermuteWorkload.cpp:15
armnn::LayerType::Stack
@ Stack
armnn::NeonLogicalNotWorkloadValidate
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogicalNotWorkload.cpp:19
BackendRegistry.hpp
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
NeonFloorDivWorkload.hpp
armnn::NeonLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:956
IgnoreUnused.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::NeonLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1267
armnn::NeonBatchMatMulValidate
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchMatMulWorkload.cpp:19
armnn::NeonLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:868
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
NeonSpaceToDepthWorkload.hpp
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::DataType::QSymmS16
@ QSymmS16
armnn::NeonConvertFp16ToFp32WorkloadValidate
arm_compute::Status NeonConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp16ToFp32Workload.cpp:19
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
NumericCast.hpp
NeonReshapeWorkload.hpp
armnn::NeonLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1523
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::NeonLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:847
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::NeonLogWorkloadValidate
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonLogWorkload.cpp:17
armnn::NeonArgMinMaxWorkloadValidate
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Definition: NeonArgMinMaxWorkload.cpp:31
armnn::NeonLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1229
NeonArgMinMaxWorkload.hpp
armnn::NeonLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1571
armnn::NeonLogSoftmaxWorkloadValidate
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
Definition: NeonLogSoftmaxWorkload.cpp:19
armnn::NeonNormalizationWorkloadValidate
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
Definition: NeonNormalizationFloatWorkload.cpp:49
LayerSupportCommon.hpp
armnn::NeonLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1591
armnn::NeonLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported) const override
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: NeonLayerSupport.cpp:731
armnn::NeonConstantWorkloadValidate
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
Definition: NeonConstantWorkload.cpp:20
armnn::LayerType::Slice
@ Slice
NeonLogSoftmaxWorkload.hpp
armnn::NeonReduceWorkloadValidate
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
Definition: NeonReduceWorkload.cpp:19
armnn::BinaryOperation::Maximum
@ Maximum
armnn::FusedDescriptor
A FusedDescriptor for the FusedLayer.
Definition: Descriptors.hpp:944
armnn::NeonLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1535
armnn::NeonPadWorkloadValidate
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
Definition: NeonPadWorkload.cpp:59
NeonGatherWorkload.hpp
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::NeonGatherNdWorkloadValidate
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
Definition: NeonGatherNdWorkload.cpp:14
armnn::NeonConcatWorkloadValidate
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
Definition: NeonConcatWorkload.cpp:27
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::NeonLstmFloatWorkloadValidate
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonLstmFloatWorkload.cpp:253
armnn::NeonLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1511
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::NeonDivisionWorkloadValidate
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
Definition: NeonDivisionWorkload.cpp:18
armnn::NeonLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1386
NeonPooling3dWorkload.hpp
armnn::NeonLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1559
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::NeonSqrtWorkloadValidate
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSqrtWorkload.cpp:18
armnn::NeonDepthToSpaceWorkloadValidate
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
Definition: NeonDepthToSpaceWorkload.cpp:19
armnn::NeonLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1660
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LogicalBinaryDescriptor::m_Operation
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
Definition: Descriptors.hpp:1534
NeonReduceWorkload.hpp
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::NeonLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input0, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1673
armnn::UnaryOperation::Exp
@ Exp
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn::LayerSupportBase::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:85
NeonLogicalOrWorkload.hpp
armnn::NeonLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1483
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Softmax
@ Softmax
armnn::NeonTransposeWorkloadValidate
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
Definition: NeonTransposeWorkload.cpp:15
armnn::NeonSoftmaxWorkloadValidate
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: NeonSoftmaxWorkload.cpp:19
armnn::NeonLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1398
armnn::PolymorphicDowncast
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
Definition: PolymorphicDowncast.hpp:74
NeonDepthToSpaceWorkload.hpp
armnn::NeonSpaceToDepthWorkloadValidate
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
Definition: NeonSpaceToDepthWorkload.cpp:19
armnn::TensorInfo::IsTypeSpaceMatch
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition: Tensor.cpp:432
armnn::NeonLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1348
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
NeonDepthwiseConvolutionWorkload.hpp
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
armnn::NeonLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1404
NeonTransposeWorkload.hpp
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
armnn::NeonLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1050
armnn::NeonLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1701
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::LayerType::Addition
@ Addition
armnn::NeonPreluWorkloadValidate
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
Definition: NeonPreluWorkload.cpp:17
NeonBatchToSpaceNdWorkload.hpp
armnn::NeonDequantizeWorkloadValidate
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonDequantizeWorkload.cpp:22
ArmComputeUtils.hpp
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::NeonLayerSupport::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1493
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
NeonQLstmWorkload.hpp
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
NeonRsqrtWorkload.hpp
armnn::NeonLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:917
armnn::NeonLogicalAndWorkloadValidate
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalAndWorkload.cpp:18
NeonSubtractionWorkload.hpp
NeonCastWorkload.hpp
armnn::NeonLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input, const TensorInfo &axis, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1547
armnn::NeonDepthwiseConvolutionWorkloadValidate
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
Definition: NeonDepthwiseConvolutionWorkload.cpp:29
armnn::BoostLogSeverityMapping::info
@ info
armnn::BinaryOperation::Power
@ Power
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::NeonLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1185
armnn::NeonLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1106
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::NeonLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1062
armnn::LayerType::Division
@ Division
armnn::NeonQuantizedLstmWorkloadValidate
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
Definition: NeonQuantizedLstmWorkload.cpp:131
armnn::NeonActivationWorkloadValidate
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
Definition: NeonActivationWorkload.cpp:17
armnn::LayerType::Shape
@ Shape
armnn::NeonLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1158
NeonLogWorkload.hpp
NeonResizeWorkload.hpp
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::NeonQuantizeWorkloadValidate
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonQuantizeWorkload.cpp:18
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
NeonTransposeConvolution2dWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
NeonFullyConnectedWorkload.hpp
NeonSinWorkload.hpp
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::NeonBatchToSpaceNdWorkloadValidate
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
Definition: NeonBatchToSpaceNdWorkload.cpp:15
armnn::NeonCastValidate
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonCastWorkload.cpp:19
armnn::NeonLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:890
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerSupportBase::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:131
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
NeonInstanceNormalizationWorkload.hpp
FORWARD_WORKLOAD_VALIDATE_FUNC
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: NeonLayerSupport.cpp:167
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::NeonLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1259
Tensor.hpp
armnn::Status
Status
Definition: Types.hpp:42
armnn::NeonLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:805
armnn::BinaryOperation::FloorDiv
@ FloorDiv
armnn::NeonLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1247
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::LayerType::Reshape
@ Reshape
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::NeonLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1685
armnn::NeonSplitterWorkloadValidate
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
Definition: NeonSplitterWorkload.cpp:33
NeonSpaceToBatchNdWorkload.hpp
NeonConstantWorkload.hpp
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::NeonFusedWorkloadValidate
arm_compute::Status NeonFusedWorkloadValidate(const std::vector< std::reference_wrapper< TensorInfo >> &inputInfos, const std::vector< std::reference_wrapper< TensorInfo >> &outputInfos, const FusedDescriptor &fusedDescriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonFusedWorkload.cpp:22
armnn::LayerType::Fill
@ Fill
armnn::NeonRsqrtWorkloadValidate
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonRsqrtWorkload.cpp:18
armnn::NeonUnidirectionalSequenceLstmWorkloadValidate
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Definition: NeonUnidirectionalSequenceLstmWorkload.cpp:491
armnn::NeonReshapeWorkloadValidate
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonReshapeWorkload.cpp:17
armnn::LayerType::L2Normalization
@ L2Normalization
NeonLstmFloatWorkload.hpp
armnn::LayerType::Fused
@ Fused
armnn::NeonConvolution2dWorkloadValidate
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Definition: NeonConvolution2dWorkload.cpp:24
NeonMaximumWorkload.hpp
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::NeonBatchNormalizationValidate
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonBatchNormalizationWorkload.cpp:24
armnn::NeonLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:880
NeonQuantizedLstmWorkload.hpp
armnn::ViewsDescriptor::GetNumDimensions
uint32_t GetNumDimensions() const
Get the number of dimensions.
Definition: Descriptors.cpp:307
armnn::LayerType::Minimum
@ Minimum
armnn::NeonSinWorkloadValidate
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonSinWorkload.cpp:17
armnn::NeonLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1170
armnn::NeonFloorDivWorkloadValidate
arm_compute::Status NeonFloorDivWorkloadValidate(const TensorInfo &input0Info, const TensorInfo &input1Info, const TensorInfo &outputInfo, const ActivationDescriptor *activationDescriptor)
Validation for the Floor Div Workload.
Definition: NeonFloorDivWorkload.cpp:52
armnn::NeonLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1017
NeonBatchMatMulWorkload.hpp
NeonMinimumWorkload.hpp
armnn::NeonLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1241
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::NeonLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1336
armnn::NeonLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1215
NeonLogicalNotWorkload.hpp
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::NeonStridedSliceWorkloadValidate
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
Definition: NeonStridedSliceWorkload.cpp:19
armnn::LayerType::MemCopy
@ MemCopy
Exceptions.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::NeonLayerSupport::NeonLayerSupport
NeonLayerSupport()
Definition: NeonLayerSupport.cpp:180
armnn::ComputeSplitAxis
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
Definition: WorkloadUtils.cpp:377
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
NeonSqrtWorkload.hpp
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
ArmComputeTensorUtils.hpp
armnn::UnaryOperation::Abs
@ Abs
armnn::NeonLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:964
NeonStackWorkload.hpp
NeonBackendModelContext.hpp
armnn::NeonSliceWorkloadValidate
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
Definition: NeonSliceWorkload.cpp:21
NeonPreluWorkload.hpp
armnn::NeonLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1373
armnn::LayerType::Input
@ Input
armnn::NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reason=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1089
armnn::LayerType::Resize
@ Resize
NeonElementwiseBinaryWorkload.hpp
NeonSliceWorkload.hpp
armnn::NeonLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1324
NeonGatherNdWorkload.hpp
NeonTileWorkload.hpp
armnn::NeonElementwiseBinaryWorkloadValidate
arm_compute::Status NeonElementwiseBinaryWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: NeonElementwiseBinaryWorkload.cpp:20
armnn::NeonPooling2dWorkloadValidate
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
Definition: NeonPooling2dWorkload.cpp:22
armnn::IsLayerTypeSupported
bool IsLayerTypeSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string & > reasonIfUnsupported, const NeonLayerSupport &support)
Definition: NeonLayerSupport.cpp:185
armnn::SetValueChecked
void SetValueChecked(Optional< T & > optionalRef, V &&val)
Definition: LayerSupportCommon.hpp:17
armnn::NeonLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:902
armnn::NeonChannelShuffleValidate
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
Definition: NeonChannelShuffleWorkload.cpp:17
armnn::NeonL2NormalizationWorkloadValidate
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
Definition: NeonL2NormalizationFloatWorkload.cpp:19
armnn::NeonComparisonWorkloadValidate
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
Definition: NeonComparisonWorkload.cpp:16
armnn::BinaryOperation::Div
@ Div
NeonMultiplicationWorkload.hpp
armnn::LayerType::Convolution2d
@ Convolution2d
NeonUnidirectionalSequenceLstmWorkload.hpp
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
NeonL2NormalizationFloatWorkload.hpp
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::LayerType::Lstm
@ Lstm
armnn::LayerSupportBase::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:98
armnn::LayerType::Dequantize
@ Dequantize
armnn::NeonLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported) const
Definition: NeonLayerSupport.cpp:1709
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::NeonLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1448
NeonConvertFp32ToFp16Workload.hpp
armnn::NeonLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:779
armnn::NeonLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1603
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:496
armnn::LayerType::Unmap
@ Unmap
armnn::NeonLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1292
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::LayerType::QLstm
@ QLstm
NeonDequantizeWorkload.hpp
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
NeonSplitterWorkload.hpp
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::NeonLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1648
armnn::NeonLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1300
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
NeonLayerSupport.hpp
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
NeonActivationWorkload.hpp
armnn::NeonLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:1424
armnn::NeonTransposeConvolution2dWorkloadValidate
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: NeonTransposeConvolution2dWorkload.cpp:25
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::NeonLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: NeonLayerSupport.cpp:984
armnn::NeonLogicalOrWorkloadValidate
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: NeonLogicalOrWorkload.cpp:18