ArmNN
 24.08
RefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefLayerSupport.hpp"
7 
8 #include <armnn/TypesUtils.hpp>
9 #include <armnn/Types.hpp>
13 
14 #include <LayerSupportCommon.hpp>
16 
17 #include <array>
18 #include <vector>
19 
20 namespace armnn
21 {
22 
23 namespace
24 {
25 
26 template<typename Float32Func, typename Uint8Func, typename ... Params>
27 bool IsSupportedForDataTypeRef(Optional<std::string&> reasonIfUnsupported,
28  DataType dataType,
29  Float32Func floatFuncPtr,
30  Uint8Func uint8FuncPtr,
31  Params&&... params)
32 {
33  return IsSupportedForDataTypeGeneric(reasonIfUnsupported,
34  dataType,
35  &FalseFunc<Params...>,
36  floatFuncPtr,
37  uint8FuncPtr,
38  &FalseFunc<Params...>,
39  &FalseFunc<Params...>,
40  std::forward<Params>(params)...);
41 }
42 
43 } // anonymous namespace
44 
45 namespace
46 {
47 
48 std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
49  unsigned int actual,
50  std::string& layerStr,
51  std::string& tensorName)
52 {
53  std::string errorMsg = "Reference " + layerStr + ": Expected " + std::to_string(expected) + " dimensions but got" +
54  " " + std::to_string(actual) + " dimensions instead, for the '" + tensorName + "' tensor.";
55 
56  return errorMsg;
57 }
58 
59 } // anonymous namespace
60 
62  const std::vector<TensorInfo>& infos,
63  const BaseDescriptor& descriptor,
64  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
65  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
66  Optional<std::string&> reasonIfUnsupported) const
67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
74  reasonIfUnsupported);
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
81  reasonIfUnsupported);
83  return IsBatchMatMulSupported(infos[0],
84  infos[1],
85  infos[2],
86  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
87  reasonIfUnsupported);
89  return IsBatchNormalizationSupported(infos[0],
90  infos[1],
91  infos[2],
92  infos[3],
93  infos[4],
94  infos[5],
95  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96  (&descriptor)),
97  reasonIfUnsupported);
99  return IsBatchToSpaceNdSupported(infos[0],
100  infos[1],
101  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
102  reasonIfUnsupported);
104  return IsBroadcastToSupported(infos[0],
105  infos[1],
106  *(PolymorphicDowncast<const BroadcastToDescriptor*>(&descriptor)),
107  reasonIfUnsupported);
109  return IsComparisonSupported(infos[0],
110  infos[1],
111  infos[2],
112  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
113  reasonIfUnsupported);
114  case LayerType::Concat:
115  {
116  std::vector<const TensorInfo*> inputInfos;
117  for (uint32_t i = 0; i < (infos.size() - 1); i++)
118  {
119  inputInfos.push_back(&infos[i]);
120  }
121  return IsConcatSupported(inputInfos,
122  infos[infos.size() - 1],
123  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
124  reasonIfUnsupported);
125  }
126  case LayerType::Constant:
127  return IsConstantSupported(infos[0], reasonIfUnsupported);
129  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
131  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
133  {
134  if (infos.size() != 4)
135  {
136  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
137  "TensorInfos should be of format: {input, output, weights, biases}.");
138  }
139 
140  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
141  if (infos[3] == TensorInfo())
142  {
143  return IsConvolution2dSupported(infos[0],
144  infos[1],
145  desc,
146  infos[2],
147  EmptyOptional(),
148  reasonIfUnsupported);
149  }
150  else
151  {
152  return IsConvolution2dSupported(infos[0],
153  infos[1],
154  desc,
155  infos[2],
156  infos[3],
157  reasonIfUnsupported);
158  }
159  }
161  return IsDepthToSpaceSupported(infos[0],
162  infos[1],
163  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
164  reasonIfUnsupported);
166  {
167  if (infos.size() != 4)
168  {
169  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
170  "TensorInfos should be of format: {input, output, weights, biases}.");
171  }
172 
173  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
174  if (infos[3] == TensorInfo())
175  {
176  return IsDepthwiseConvolutionSupported(infos[0],
177  infos[1],
178  desc,
179  infos[2],
180  EmptyOptional(),
181  reasonIfUnsupported);
182  }
183  else
184  {
185  return IsDepthwiseConvolutionSupported(infos[0],
186  infos[1],
187  desc,
188  infos[2],
189  infos[3],
190  reasonIfUnsupported);
191  }
192  }
194  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
195  case LayerType::Division:
196  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
198  {
199  std::array<DataType, 7> supportedTypes =
200  {
207  };
208 
209  bool supported = true;
210  supported &= CheckSupportRule(TypeAnyOf(infos[0], supportedTypes), reasonIfUnsupported,
211  "Reference elementwise unary: input type not supported");
212 
213  supported &= CheckSupportRule(TypeAnyOf(infos[1], supportedTypes), reasonIfUnsupported,
214  "Reference elementwise unary: input type not supported");
215 
216  supported &= CheckSupportRule(TypeAnyOf(infos[2], supportedTypes), reasonIfUnsupported,
217  "Reference elementwise unary: output type not supported");
218 
219  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[1]), reasonIfUnsupported,
220  "Reference elementwise unary: input types not matching");
221 
222  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[2]), reasonIfUnsupported,
223  "Reference elementwise unary: input and output types not matching");
224 
225  return supported;
226  }
228  return IsElementwiseUnarySupported(infos[0],
229  infos[1],
230  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
231  reasonIfUnsupported);
232  case LayerType::Fill:
233  return IsFillSupported(infos[0],
234  infos[1],
235  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
236  reasonIfUnsupported);
237  case LayerType::Floor:
238  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
240  return IsFullyConnectedSupported(infos[0],
241  infos[1],
242  infos[2],
243  infos[3],
244  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
245  reasonIfUnsupported);
246  case LayerType::Gather:
247  return IsGatherSupported(infos[0],
248  infos[1],
249  infos[2],
250  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
251  reasonIfUnsupported);
252  case LayerType::GatherNd:
253  return IsGatherNdSupported(infos[0],
254  infos[1],
255  infos[2],
256  reasonIfUnsupported);
257  case LayerType::Input:
258  return IsInputSupported(infos[0], reasonIfUnsupported);
260  return IsInstanceNormalizationSupported(infos[0],
261  infos[1],
262  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
263  (&descriptor)),
264  reasonIfUnsupported);
266  return IsL2NormalizationSupported(infos[0],
267  infos[1],
268  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
269  reasonIfUnsupported);
271  return IsLogicalBinarySupported(infos[0],
272  infos[1],
273  infos[2],
274  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
275  reasonIfUnsupported);
277  return IsLogSoftmaxSupported(infos[0],
278  infos[1],
279  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
280  reasonIfUnsupported);
281  case LayerType::Lstm:
282  return IsLstmSupported(infos[0],
283  infos[1],
284  infos[2],
285  infos[3],
286  infos[4],
287  infos[5],
288  infos[6],
289  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
290  lstmParamsInfo.value(),
291  reasonIfUnsupported);
292  case LayerType::QLstm:
293  return IsQLstmSupported(infos[0],
294  infos[1],
295  infos[2],
296  infos[3],
297  infos[4],
298  infos[5],
299  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
300  lstmParamsInfo.value(),
301  reasonIfUnsupported);
302  case LayerType::Maximum:
303  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
304  case LayerType::Mean:
305  return IsMeanSupported(infos[0],
306  infos[1],
307  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
308  reasonIfUnsupported);
309  case LayerType::Minimum:
310  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
312  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
314  return IsNormalizationSupported(infos[0],
315  infos[1],
316  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
317  reasonIfUnsupported);
318  case LayerType::Output:
319  return IsOutputSupported(infos[0], reasonIfUnsupported);
320  case LayerType::Pad:
321  return IsPadSupported(infos[0],
322  infos[1],
323  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
324  reasonIfUnsupported);
325  case LayerType::Permute:
326  return IsPermuteSupported(infos[0],
327  infos[1],
328  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
329  reasonIfUnsupported);
331  return IsPooling2dSupported(infos[0],
332  infos[1],
333  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
334  reasonIfUnsupported);
335  case LayerType::Prelu:
336  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
337  case LayerType::Quantize:
338  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
339  case LayerType::Reshape:
340  return IsReshapeSupported(infos[0],
341  infos[1],
342  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
343  reasonIfUnsupported);
344  case LayerType::Resize:
345  return IsResizeSupported(infos[0],
346  infos[1],
347  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
348  reasonIfUnsupported);
350  return IsReverseV2Supported(infos[0],
351  infos[1],
352  infos[2],
353  reasonIfUnsupported);
354  case LayerType::Reduce:
355  return IsReduceSupported(infos[0],
356  infos[1],
357  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
358  reasonIfUnsupported);
360  return IsScatterNdSupported(infos[0],
361  infos[1],
362  infos[2],
363  infos[3],
364  *(PolymorphicDowncast<const ScatterNdDescriptor*>(&descriptor)),
365  reasonIfUnsupported);
366  case LayerType::Slice:
367  return IsSliceSupported(infos[0],
368  infos[1],
369  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
370  reasonIfUnsupported);
371  case LayerType::Softmax:
372  return IsSoftmaxSupported(infos[0],
373  infos[1],
374  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
375  reasonIfUnsupported);
377  return IsSpaceToBatchNdSupported(infos[0],
378  infos[1],
379  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
380  reasonIfUnsupported);
382  return IsSpaceToDepthSupported(infos[0],
383  infos[1],
384  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
385  reasonIfUnsupported);
386  case LayerType::Splitter:
387  {
388  std::vector<TensorInfo> outputInfos;
389  for (uint32_t i = 1; i < infos.size(); i++)
390  {
391  outputInfos.push_back(infos[i]);
392  }
393  return IsSplitterSupported(infos[0],
394  {outputInfos.begin(), outputInfos.end()},
395  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
396  reasonIfUnsupported);
397  }
398  case LayerType::Stack:
399  {
400  std::vector<const TensorInfo*> inputInfos;
401  for (uint32_t i = 0; i < infos.size() - 1; i++)
402  {
403  inputInfos.push_back(&infos[i]);
404  }
405  return IsStackSupported(inputInfos,
406  infos[infos.size() - 1],
407  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
408  reasonIfUnsupported);
409  }
411  return IsStridedSliceSupported(infos[0],
412  infos[1],
413  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
414  reasonIfUnsupported);
416  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
417  case LayerType::Tile:
418  return IsTileSupported(infos[0],
419  infos[1],
420  *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
421  reasonIfUnsupported);
423  return IsTransposeSupported(infos[0],
424  infos[1],
425  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
426  reasonIfUnsupported);
428  {
429  if (infos.size() != 4)
430  {
431  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
432  "TensorInfos should be of format: {input, output, weights, biases}.");
433  }
434 
435  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
436  if (infos[3] == TensorInfo())
437  {
438  return IsTransposeConvolution2dSupported(infos[0],
439  infos[1],
440  desc,
441  infos[2],
442  EmptyOptional(),
443  reasonIfUnsupported);
444  }
445  else
446  {
447  return IsTransposeConvolution2dSupported(infos[0],
448  infos[1],
449  desc,
450  infos[2],
451  infos[3],
452  reasonIfUnsupported);
453  }
454  }
455  case LayerType::Cast:
456  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
458  return IsChannelShuffleSupported(infos[0],
459  infos[1],
460  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
461  reasonIfUnsupported);
463  {
464  if (infos.size() != 4)
465  {
466  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
467  "TensorInfos should be of format: {input, output, weights, biases}.");
468  }
469 
470  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
471  if (infos[3] == TensorInfo())
472  {
473  return IsConvolution3dSupported(infos[0],
474  infos[1],
475  desc,
476  infos[2],
477  EmptyOptional(),
478  reasonIfUnsupported);
479  }
480  else
481  {
482  return IsConvolution3dSupported(infos[0],
483  infos[1],
484  desc,
485  infos[2],
486  infos[3],
487  reasonIfUnsupported);
488  }
489  }
490  case LayerType::Debug:
491  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
493  return IsDetectionPostProcessSupported(infos[0],
494  infos[1],
495  infos[2],
496  infos[3],
497  infos[4],
498  infos[5],
499  infos[6],
500  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
501  (&descriptor)),
502  reasonIfUnsupported);
504  return IsFakeQuantizationSupported(infos[0],
505  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
506  reasonIfUnsupported);
507  case LayerType::MemCopy:
508  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
509  case LayerType::Rank:
510  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
511  case LayerType::Shape:
512  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
514  {
515  if (infos.size() != 6)
516  {
517  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
518  "should be of format: {input, outputStateIn, cellStateIn, "
519  "hiddenStateOutputVal, cellStateOutputVal, output}");
520  }
521  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
523  infos[1],
524  infos[2],
525  infos[3],
526  infos[4],
527  infos[5],
528  desc,
529  lstmParamsInfo.value(),
530  reasonIfUnsupported);
531  }
533  return IsPooling3dSupported(infos[0],
534  infos[1],
535  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
536  reasonIfUnsupported);
537  case LayerType::Map:
538  return true;
539  case LayerType::Unmap:
540  return true;
542  return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
543  case LayerType::Merge:
544  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
547  infos[1],
548  infos[2],
549  infos[3],
550  infos[4],
551  quantizedLstmInputParamsInfo.value(),
552  reasonIfUnsupported);
553  default:
554  // layers not supported in reference by default:
555  // precompiled, standin, switch, fused
556  return false;
557  }
558 }
559 
561  const TensorInfo& output,
562  const ActivationDescriptor& descriptor,
563  Optional<std::string&> reasonIfUnsupported) const
564 {
565  bool supported = true;
566 
567  // Define supported types.
568  std::array<DataType,6> supportedTypes = {
574  };
575 
576  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
577  "Reference activation: input type not supported.");
578 
579  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
580  "Reference activation: output type not supported.");
581 
582  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
583  "Reference activation: input and output types mismatched.");
584 
585  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
586  "Reference activation: input and output shapes are of different rank.");
587 
588 
589  struct ActivationFunctionSupported : public Rule
590  {
591  ActivationFunctionSupported(const ActivationDescriptor& desc)
592  {
593  switch(desc.m_Function)
594  {
608  {
609  m_Res = true;
610  break;
611  }
612  default:
613  {
614  m_Res = false;
615  break;
616  }
617  }
618  }
619  };
620 
621  // Function is supported
622  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
623  "Reference activation: function not supported.");
624 
625  return supported;
626 }
627 
629  const TensorInfo& input1,
630  const TensorInfo& output,
631  Optional<std::string&> reasonIfUnsupported) const
632 {
633  bool supported = true;
634 
635  std::array<DataType,7> supportedTypes = {
642  };
643 
644  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
645  "Reference addition: input 0 is not a supported type.");
646 
647  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
648  "Reference addition: input 1 is not a supported type.");
649 
650  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
651  "Reference addition: output is not a supported type.");
652 
653  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
654  "Reference addition: input 0 and Input 1 types are mismatched");
655 
656  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
657  "Reference addition: input and output types are mismatched");
658 
659  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
660  "Reference addition: shapes are not suitable for implicit broadcast.");
661 
662  return supported;
663 }
664 
666  const armnn::ArgMinMaxDescriptor &descriptor,
667  armnn::Optional<std::string &> reasonIfUnsupported) const
668 {
669  IgnoreUnused(descriptor);
670 
671  std::array<DataType, 8> supportedInputTypes =
672  {
680  };
681 
682  std::array<DataType,2> supportedOutputTypes = {
685  };
686 
687  bool supported = true;
688 
689  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
690  "Reference ArgMinMax: input is not a supported type.");
691  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
692  "Reference ArgMinMax: output type not supported");
693 
694  return supported;
695 }
696 
698  const TensorInfo& inputY,
699  const TensorInfo& output,
700  const BatchMatMulDescriptor& descriptor,
701  Optional<std::string &> reasonIfUnsupported) const
702 {
703  IgnoreUnused(descriptor);
704 
705  std::array<DataType, 6> supportedTypes =
706  {
712  };
713 
714  bool supported = true;
715 
716  supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
717  "Reference batch matrix multiplication: input X is not a supported type");
718 
719  supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
720  "Reference batch matrix multiplication: input Y is not a supported type");
721 
722  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
723  "Reference batch matrix multiplication: output is not a supported type");
724 
725  supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
726  "Reference batch matrix multiplication: input X and input Y types are mismatched");
727 
728  supported &= CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
729  "Reference batch matrix multiplication: inputs and output types are mismatched");
730 
732  reasonIfUnsupported,
733  "Reference batch matrix multiplication: input X is not of rank 2 or greater");
734 
736  reasonIfUnsupported,
737  "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
738 
739  return supported;
740 }
741 
743  const TensorInfo& output,
744  const TensorInfo& mean,
745  const TensorInfo& variance,
746  const TensorInfo& beta,
747  const TensorInfo& gamma,
748  const BatchNormalizationDescriptor& descriptor,
749  Optional<std::string&> reasonIfUnsupported) const
750 {
751  IgnoreUnused(descriptor);
752 
753  std::array<DataType, 6> supportedTypes =
754  {
760  };
761 
762  bool supported = true;
763 
764  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
765  "Reference batch normalization: input is not a supported type.");
766 
767  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
768  "Reference batch normalization: output is not a supported type.");
769 
770  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
771  "Reference batch normalization: input and output types are mismatched");
772 
773  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
774  "Reference batch normalization: mean is not a supported type.");
775 
776  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
777  "Reference batch normalization: variance is not a supported type.");
778 
779  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
780  "Reference batch normalization: beta is not a supported type.");
781 
782  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
783  "Reference batch normalization: gamma is not a supported type.");
784 
785  return supported;
786 }
787 
789  const TensorInfo& output,
790  const BatchToSpaceNdDescriptor& descriptor,
791  Optional<std::string&> reasonIfUnsupported) const
792 {
793  IgnoreUnused(descriptor);
794 
795  bool supported = true;
796 
797  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
798  std::string inputTensorStr = "input";
799  std::string outputTensorStr = "output";
800 
801  // Define supported types.
802  std::array<DataType,6> supportedTypes =
803  {
809  };
810 
811  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
812  "Reference BatchToSpaceNd: input type not supported.");
813 
814  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
815  "Reference BatchToSpaceNd: output type not supported.");
816 
817  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
818  "Reference BatchToSpaceNd: input and output types mismatched.");
819 
820  return supported;
821 }
822 
824  const TensorInfo& output,
825  const BroadcastToDescriptor& descriptor,
826  Optional<std::string&> reasonIfUnsupported) const
827 {
828  IgnoreUnused(descriptor);
829 
830  bool supported = true;
831 
832  std::array<DataType, 8> supportedTypes
833  {
842  };
843 
844  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
845  "BroadcastTo: input type not supported.");
846 
847  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
848  "BroadcastTo: output type not supported");
849 
850  return supported;
851 }
852 
854  const TensorInfo& output,
855  Optional<std::string&> reasonIfUnsupported) const
856 {
857  std::array<DataType, 10> supportedInputTypes =
858  {
867  };
868 
869  bool supported = true;
870  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
871  "Reference cast: input is not a supported type");
872 
873 
874  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
875  "Reference cast: output is not a supported type");
876 
877  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
878  "Reference cast: input and output shapes have different number of total elements");
879 
880  return supported;
881 }
882 
884  const TensorInfo& output,
885  const ChannelShuffleDescriptor& descriptor,
886  Optional<std::string&> reasonIfUnsupported) const
887 {
888  IgnoreUnused(descriptor);
889  bool supported = true;
890 
891  // Define supported output and inputs types.
892  std::array<DataType, 7> supportedTypes =
893  {
900  };
901 
902  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
903  "Reference ChannelShuffle: input is not a supported type.");
904 
905  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
906  "Reference ChannelShuffle: output is not a supported type.");
907 
908  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
909  "Reference ChannelShuffle: input and output types are mismatched.");
910 
911  return supported;
912 }
913 
914 
916  const TensorInfo& input1,
917  const TensorInfo& output,
918  const ComparisonDescriptor& descriptor,
919  Optional<std::string&> reasonIfUnsupported) const
920 {
921  IgnoreUnused(descriptor);
922  std::array<DataType, 8> supportedInputTypes =
923  {
931  };
932 
933  bool supported = true;
934  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
935  "Reference comparison: input 0 is not a supported type");
936 
937  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
938  "Reference comparison: input 0 and Input 1 types are mismatched");
939 
940  supported &= CheckSupportRule(TypeIs(output, DataType::Boolean), reasonIfUnsupported,
941  "Reference comparison: output is not of type Boolean");
942 
943  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
944  "Reference comparison: shapes are not suitable for implicit broadcast.");
945 
946  return supported;
947 }
948 
949 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
950  const TensorInfo& output,
951  const OriginsDescriptor& descriptor,
952  Optional<std::string&> reasonIfUnsupported) const
953 {
954  IgnoreUnused(descriptor);
955 
956  bool supported = true;
957  std::array<DataType,7> supportedTypes =
958  {
965  };
966 
967  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
968  "Reference concatenation: output type not supported");
969  for (const TensorInfo* input : inputs)
970  {
971  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
972  "Reference concatenation: input type not supported");
973 
974  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
975  "Reference concatenation: input and output types mismatched.");
976  }
977 
978  return supported;
979 }
980 
982  Optional<std::string&> reasonIfUnsupported) const
983 {
984  std::array<DataType,8> supportedTypes =
985  {
993  };
994 
995  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
996  "Reference constant: output is not a supported type.");
997 }
998 
1000  const TensorInfo& output,
1001  Optional<std::string&> reasonIfUnsupported) const
1002 {
1003  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1004  input.GetDataType(),
1005  &TrueFunc<>,
1006  &FalseInputFuncF32<>,
1007  &FalseFuncU8<>,
1008  &FalseFuncI32<>,
1009  &FalseFuncU8<>) &&
1010  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1011  output.GetDataType(),
1012  &FalseOutputFuncF16<>,
1013  &TrueFunc<>,
1014  &FalseFuncU8<>,
1015  &FalseFuncI32<>,
1016  &FalseFuncU8<>));
1017 }
1018 
1020  const TensorInfo& output,
1021  Optional<std::string&> reasonIfUnsupported) const
1022 {
1023  return (IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1024  input.GetDataType(),
1025  &FalseInputFuncF16<>,
1026  &TrueFunc<>,
1027  &FalseFuncU8<>,
1028  &FalseFuncI32<>,
1029  &FalseFuncU8<>) &&
1030  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1031  output.GetDataType(),
1032  &TrueFunc<>,
1033  &FalseOutputFuncF32<>,
1034  &FalseFuncU8<>,
1035  &FalseFuncI32<>,
1036  &FalseFuncU8<>));
1037 }
1038 
1040  const TensorInfo& output,
1041  const Convolution2dDescriptor& descriptor,
1042  const TensorInfo& weights,
1043  const Optional<TensorInfo>& biases,
1044  Optional<std::string&> reasonIfUnsupported) const
1045 {
1046  bool supported = true;
1047 
1048  // Define supported types.
1049  std::array<DataType,7> supportedTypes =
1050  {
1057  };
1058 
1059  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1060  "Reference Convolution2d: input is not a supported type.");
1061 
1062  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1063  "Reference Convolution2d: output is not a supported type.");
1064 
1065  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1066  "Reference Convolution2d: input and output types mismatched.");
1067 
1068 
1069  const DataType inputType = input.GetDataType();
1070  if (IsQuantized8BitType(inputType))
1071  {
1072  std::array<DataType, 3> supportedWeightTypes =
1073  {
1077  };
1078 
1079  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1080  "Reference Convolution2d: weights type not supported for quantized input.");
1081  }
1082  else
1083  {
1084  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1085  "Reference Convolution2d: weights is not a supported type.");
1086 
1087  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1088  "Reference Convolution2d: input and weights types mismatched.");
1089  }
1090 
1091  if (biases.has_value())
1092  {
1093  std::array<DataType,4> biasesSupportedTypes =
1094  {
1098  };
1099 
1100  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1101  "Reference Convolution2d: biases is not a supported type.");
1102  }
1103  IgnoreUnused(descriptor);
1104 
1105  return supported;
1106 }
1107 
1109  const TensorInfo& output,
1110  const Convolution3dDescriptor& descriptor,
1111  const TensorInfo& weights,
1112  const Optional<TensorInfo>& biases,
1113  Optional<std::string&> reasonIfUnsupported) const
1114 {
1115  bool supported = true;
1116 
1117  // Define supported types.
1118  std::array<DataType,7> supportedTypes =
1119  {
1126  };
1127 
1128  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1129  "Reference Convolution3d: input is not a supported type.");
1130 
1131  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1132  "Reference Convolution3d: output is not a supported type.");
1133 
1134  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1135  "Reference Convolution3d: input and output types mismatched.");
1136 
1137  const DataType inputType = input.GetDataType();
1138  if (IsQuantized8BitType(inputType))
1139  {
1140  std::array<DataType, 3> supportedWeightTypes =
1141  {
1145  };
1146 
1147  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1148  "Reference Convolution3d: weights type not supported for quantized input.");
1149  }
1150  else
1151  {
1152  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1153  "Reference Convolution3d: weights is not a supported type.");
1154 
1155  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1156  "Reference Convolution3d: input and weights types mismatched.");
1157  }
1158 
1159  if (biases.has_value())
1160  {
1161  std::array<DataType,4> biasesSupportedTypes =
1162  {
1166  };
1167 
1168  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1169  "Reference Convolution3d: biases is not a supported type.");
1170  }
1171  IgnoreUnused(descriptor);
1172 
1173  return supported;
1174 }
1175 
1177  const TensorInfo& output,
1178  Optional<std::string&> reasonIfUnsupported) const
1179 {
1180  bool supported = true;
1181 
1182  std::array<DataType, 8> supportedTypes =
1183  {
1192  };
1193 
1194  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1195  "Reference for Debug layer: input type not supported");
1196 
1197  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1198  "Reference for Debug layer: output type not supported");
1199 
1200  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1201  "Reference for Debug layer: input and output types are mismatched");
1202 
1203  return supported;
1204 }
1205 
1207  const TensorInfo& output,
1208  const DepthToSpaceDescriptor& descriptor,
1209  Optional<std::string&> reasonIfUnsupported) const
1210 {
1211  IgnoreUnused(descriptor);
1212  bool supported = true;
1213 
1214  std::array<DataType,6> supportedTypes =
1215  {
1221  };
1222 
1223  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1224  "Reference DepthToSpace: input type not supported");
1225 
1226  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1227  "Reference DepthToSpace: output type not supported");
1228 
1229  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1230  "Reference DepthToSpace: input and output types are mismatched");
1231 
1232  return supported;
1233 }
1234 
1236  const TensorInfo& output,
1237  const DepthwiseConvolution2dDescriptor& descriptor,
1238  const TensorInfo& weights,
1239  const Optional<TensorInfo>& biases,
1240  Optional<std::string&> reasonIfUnsupported) const
1241 {
1242  IgnoreUnused(descriptor);
1243  bool supported = true;
1244 
1245  // Define supported types.
1246  std::array<DataType,7> supportedTypes =
1247  {
1254  };
1255 
1256  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1257  "Reference DepthwiseConvolution2d: input is not a supported type.");
1258 
1259  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1260  "Reference DepthwiseConvolution2d: output is not a supported type.");
1261 
1262  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1263  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1264 
1265  const DataType inputType = input.GetDataType();
1266  if (IsQuantized8BitType(inputType))
1267  {
1268  std::array<DataType, 3> supportedWeightTypes =
1269  {
1273  };
1274 
1275  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1276  "Reference DepthwiseConvolution2d: weights type not supported for "
1277  "quantized input.");
1278  }
1279  else
1280  {
1281  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1282  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1283 
1284  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1285  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1286  }
1287 
1288  if (biases.has_value())
1289  {
1290  std::array<DataType,4> biasesSupportedTypes =
1291  {
1295  };
1296  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1297  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1298  }
1299 
1300  return supported;
1301 
1302 }
1303 
1305  const TensorInfo& output,
1306  Optional<std::string&> reasonIfUnsupported) const
1307 {
1308  bool supported = true;
1309 
1310  std::array<DataType,5> supportedInputTypes = {
1316  };
1317 
1318  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1319  "Reference for Dequantize layer: input type not supported.");
1320 
1321  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1322  "Reference for Dequantize layer: per-axis quantized input not supported.");
1323 
1324  std::array<DataType,3> supportedOutputTypes = {
1327  };
1328 
1329  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1330  "Reference for Dequantize layer: output type not supported.");
1331 
1332  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1333  "Reference for Dequantize layer: input/output shapes have different num total "
1334  "elements.");
1335 
1336  return supported;
1337 }
1338 
1340  const TensorInfo& scores,
1341  const TensorInfo& anchors,
1342  const TensorInfo& detectionBoxes,
1343  const TensorInfo& detectionClasses,
1344  const TensorInfo& detectionScores,
1345  const TensorInfo& numDetections,
1346  const DetectionPostProcessDescriptor& descriptor,
1347  Optional<std::string&> reasonIfUnsupported) const
1348 {
1349  IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
1350 
1351  bool supported = true;
1352 
1353  std::array<DataType,6> supportedInputTypes =
1354  {
1360  };
1361 
1362  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1363  "Reference DetectionPostProcess: input 0 is not a supported type.");
1364 
1365  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1366  "Reference DetectionPostProcess: input 1 is not a supported type.");
1367 
1368  return supported;
1369 }
1370 
1372  const TensorInfo& output,
1373  const DepthwiseConvolution2dDescriptor& descriptor,
1374  const TensorInfo& weights,
1375  const Optional<TensorInfo>& biases,
1376  Optional<std::string&> reasonIfUnsupported) const
1377 {
1378  return IsDepthwiseConvolutionSupported(input, output, descriptor, weights, biases, reasonIfUnsupported);
1379 }
1380 
1382  const TensorInfo& input1,
1383  const TensorInfo& output,
1384  Optional<std::string&> reasonIfUnsupported) const
1385 {
1386  bool supported = true;
1387 
1388  std::array<DataType,7> supportedTypes = {
1395  };
1396 
1397  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1398  "Reference division: input 0 is not a supported type.");
1399 
1400  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1401  "Reference division: input 1 is not a supported type.");
1402 
1403  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1404  "Reference division: output is not a supported type.");
1405 
1406  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1407  "Reference division: input 0 and Input 1 types are mismatched");
1408 
1409  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1410  "Reference division: input and output types are mismatched");
1411 
1412  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1413  "Reference division: shapes are not suitable for implicit broadcast.");
1414 
1415  return supported;
1416 }
1417 
1419  const TensorInfo& output,
1420  const ElementwiseUnaryDescriptor& descriptor,
1421  Optional<std::string&> reasonIfUnsupported) const
1422 {
1423  IgnoreUnused(descriptor);
1424 
1425  std::array<DataType, 7> supportedTypes =
1426  {
1433  };
1434 
1435  std::array<DataType, 1> logicalSupportedTypes =
1436  {
1438  };
1439 
1440  bool supported = true;
1441 
1442  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1443  {
1444  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1445  "Reference elementwise unary: input type not supported");
1446 
1447  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1448  "Reference elementwise unary: output type not supported");
1449  }
1450  else
1451  {
1452  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1453  "Reference elementwise unary: input type not supported");
1454 
1455  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1456  "Reference elementwise unary: output type not supported");
1457  }
1458 
1459  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1460  "Reference elementwise unary: input and output types not matching");
1461 
1462  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1463  "Reference elementwise unary: input and output shapes"
1464  "have different number of total elements");
1465 
1466  return supported;
1467 }
1468 
1470  const FakeQuantizationDescriptor& descriptor,
1471  Optional<std::string&> reasonIfUnsupported) const
1472 {
1473  IgnoreUnused(descriptor);
1474  bool supported = true;
1475 
1476  std::array<DataType,1> supportedTypes =
1477  {
1479  };
1480 
1481  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1482  "Reference fake quantization: input type not supported.");
1483 
1484  return supported;
1485 }
1486 
1488  const TensorInfo& output,
1489  const FillDescriptor& descriptor,
1490  Optional<std::string&> reasonIfUnsupported) const
1491 {
1492  IgnoreUnused(descriptor);
1493  IgnoreUnused(output);
1494 
1495  bool supported = true;
1496 
1497  std::array<DataType,3> supportedTypes =
1498  {
1502  };
1503 
1504  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1505  "Reference Fill: input type not supported.");
1506 
1507  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1508  "Reference Fill: output type not supported.");
1509  return supported;
1510 }
1511 
1513  const TensorInfo& output,
1514  Optional<std::string&> reasonIfUnsupported) const
1515 {
1516  IgnoreUnused(output);
1517  bool supported = true;
1518 
1519  std::array<DataType,3> supportedTypes =
1520  {
1523  };
1524 
1525  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1526  "Reference Floor: input type not supported.");
1527 
1528  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1529  "Reference Floor: output type not supported.");
1530 
1531  return supported;
1532 }
1533 
1535  const TensorInfo& output,
1536  const TensorInfo& weights,
1537  const TensorInfo& biases,
1538  const FullyConnectedDescriptor& descriptor,
1539  Optional<std::string&> reasonIfUnsupported) const
1540 {
1541  bool supported = true;
1542 
1543  // Define supported types.
1544  std::array<DataType,6> supportedTypes =
1545  {
1552  };
1553 
1554  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1555  "Reference Fully Connected: input type not supported.");
1556 
1557  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1558  "Reference Fully Connected: output type not supported.");
1559 
1560  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1561  "Reference Fully Connected: weights type not supported.");
1562 
1563  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1564  "Reference Fully Connected: input and output types mismatched.");
1565 
1566  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1567  "Reference Fully Connected: weights is not a supported type.");
1568 
1569  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1570  "Reference Fully Connected: input and weights types mismatched.");
1571 
1572  if (descriptor.m_BiasEnabled)
1573  {
1574  // Defined supported types for bias
1575  std::array<DataType, 5>
1576  supportedBiasTypes =
1577  {
1582  };
1583 
1584  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1585  "Reference Fully Connected: bias type not supported.");
1586 
1587  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1588  "Reference Fully Connected: bias and weight types mismatch.");
1589 
1590  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1591  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1592 
1593  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1594  "Reference Fully Connected: bias must have 1 dimension.");
1595 
1596  }
1597 
1598  return supported;
1599 }
1600 
1602  const armnn::TensorInfo& input1,
1603  const armnn::TensorInfo& output,
1604  armnn::Optional<std::string&> reasonIfUnsupported) const
1605 {
1606  bool supported = true;
1607  std::array<DataType,7> supportedTypes =
1608  {
1615  };
1616 
1617  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1618  "Reference GatherNd: input type not supported");
1619 
1620  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1621  "Reference GatherNd: output type not supported");
1622 
1623  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1624  "Reference GatherNd: indices (input1) type not supported");
1625 
1626  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1627  "Reference GatherNd: input and output types not matching");
1628 
1629  return supported;
1630 }
1631 
1633  const armnn::TensorInfo& input1,
1634  const armnn::TensorInfo& output,
1635  const GatherDescriptor& descriptor,
1636  armnn::Optional<std::string&> reasonIfUnsupported) const
1637 {
1638  bool supported = true;
1639  std::array<DataType,7> supportedTypes =
1640  {
1647  };
1648 
1649  IgnoreUnused(descriptor);
1650  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1651  "Reference Gather: input type not supported");
1652 
1653  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1654  "Reference Gather: output type not supported");
1655 
1656  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1657  "Reference Gather: indices (input1) type not supported");
1658 
1659  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1660  "Reference Gather: input and output types not matching");
1661 
1662  return supported;
1663 }
1664 
1666  Optional<std::string&> /*reasonIfUnsupported*/) const
1667 {
1668  return true;
1669 }
1670 
1672  const TensorInfo& output,
1673  const InstanceNormalizationDescriptor& descriptor,
1674  Optional<std::string&> reasonIfUnsupported) const
1675 {
1676  IgnoreUnused(descriptor);
1677  // Define supported types
1678  std::array<DataType, 3> supportedTypes =
1679  {
1682  };
1683 
1684  bool supported = true;
1685 
1686  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1687  "Reference Instance Normalization: input type not supported.");
1688 
1689  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1690  "Reference Instance Normalization: output type not supported.");
1691 
1692  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1693  "Reference Instance Normalization: input and output types mismatched.");
1694 
1695  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1696  "Reference Instance Normalization: input and output shapes have different "
1697  "num total elements.");
1698 
1699  return supported;
1700 }
1701 
1703  const TensorInfo& output,
1704  const L2NormalizationDescriptor& descriptor,
1705  Optional<std::string&> reasonIfUnsupported) const
1706 {
1707  IgnoreUnused(descriptor);
1708  // Define supported types
1709  std::array<DataType, 6> supportedTypes =
1710  {
1716  };
1717 
1718  bool supported = true;
1719 
1720  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1721  "Reference L2normalization: input type not supported.");
1722 
1723  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1724  "Reference L2normalization: output type not supported.");
1725 
1726  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1727  "Reference L2normalization: input and output types mismatched.");
1728 
1729  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1730  "Reference L2normalization: input and output shapes have different "
1731  "num total elements.");
1732 
1733  return supported;
1734 }
1735 
1737  const TensorInfo& input1,
1738  const TensorInfo& output,
1739  const LogicalBinaryDescriptor& descriptor,
1740  Optional<std::string&> reasonIfUnsupported) const
1741 {
1742  IgnoreUnused(descriptor);
1743 
1744  std::array<DataType, 1> supportedTypes =
1745  {
1747  };
1748 
1749  bool supported = true;
1750  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1751  "Reference LogicalBinary: input 0 type not supported");
1752  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1753  "Reference LogicalBinary: input 1 type not supported");
1754 
1755  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1756  "Reference LogicalBinary: input and output types do not match");
1757 
1758  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1759  "Reference LogicalBinary: shapes are not suitable for implicit broadcast.");
1760 
1761  return supported;
1762 }
1763 
1765  const TensorInfo& output,
1766  const LogSoftmaxDescriptor& descriptor,
1767  Optional<std::string&> reasonIfUnsupported) const
1768 {
1769  IgnoreUnused(descriptor);
1770 
1771  std::array<DataType, 3> supportedTypes =
1772  {
1775  };
1776 
1777  bool supported = true;
1778  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1779  "Reference LogSoftmax: input type not supported");
1780 
1781  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1782  "Reference LogSoftmax: output type not supported");
1783 
1784  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1785  "Reference LogSoftmax: input and output types do not match");
1786 
1787  return supported;
1788 }
1789 
1791  const TensorInfo& outputStateIn,
1792  const TensorInfo& cellStateIn,
1793  const TensorInfo& scratchBuffer,
1794  const TensorInfo& outputStateOut,
1795  const TensorInfo& cellStateOut,
1796  const TensorInfo& output,
1797  const LstmDescriptor& descriptor,
1798  const LstmInputParamsInfo& paramsInfo,
1799  Optional<std::string&> reasonIfUnsupported) const
1800 {
1801  IgnoreUnused(descriptor);
1802  IgnoreUnused(paramsInfo);
1803 
1804  bool supported = true;
1805 
1806  std::array<DataType,3> supportedTypes = {
1809  };
1810 
1811  // check inputs and outputs
1812  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1813  "Reference Lstm: input is not a supported type.");
1814  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1815  "Reference Lstm: input and outputStateIn types are mismatched");
1816  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1817  "Reference Lstm: input and cellStateIn types are mismatched");
1818  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1819  "Reference Lstm: input and scratchBuffer types are mismatched");
1820  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1821  "Reference Lstm: input and outputStateOut types are mismatched");
1822  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1823  "Reference Lstm: input and cellStateOut types are mismatched");
1824 
1825  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1826  "Reference Lstm: input and output types are mismatched");
1827  // check layer parameters
1828  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1829  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1830  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1831  "Reference Lstm: input and InputToCellWeights types are mismatched");
1832  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1833  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1834  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1835  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1836  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1837  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1838  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1839  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1840  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1841  "Reference Lstm: input and ForgetGateBias types are mismatched");
1842  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1843  "Reference Lstm: input and CellBias types are mismatched");
1844  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1845  "Reference Lstm: input and OutputGateBias types are mismatched");
1846  if (!descriptor.m_CifgEnabled)
1847  {
1848  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1849  "Reference Lstm: input and InputToInputWeights types are mismatched");
1850  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1851  reasonIfUnsupported,
1852  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1853  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1854  "Reference Lstm: input and InputGateBias types are mismatched");
1855  if (descriptor.m_PeepholeEnabled)
1856  {
1857  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1858  reasonIfUnsupported,
1859  "Reference Lstm: input and CellToInputWeights types are mismatched");
1860  }
1861  }
1862  if (descriptor.m_PeepholeEnabled)
1863  {
1864  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1865  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1866  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1867  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1868  }
1869  if (descriptor.m_ProjectionEnabled)
1870  {
1871  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1872  "Reference Lstm: input and mProjectionWeights types are mismatched");
1873  if (paramsInfo.m_ProjectionBias != nullptr)
1874  {
1875  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1876  "Reference Lstm: input and ProjectionBias types are mismatched");
1877  }
1878  }
1879  if (descriptor.m_LayerNormEnabled)
1880  {
1881  if (!descriptor.m_CifgEnabled)
1882  {
1883  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1884  reasonIfUnsupported,
1885  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1886  }
1887  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1888  reasonIfUnsupported,
1889  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1890  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1891  reasonIfUnsupported,
1892  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1893  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1894  reasonIfUnsupported,
1895  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1896  }
1897 
1898  return supported;
1899 }
1900 
1902  const TensorInfo& input1,
1903  const TensorInfo& output,
1904  Optional<std::string&> reasonIfUnsupported) const
1905 {
1906  bool supported = true;
1907 
1908  std::array<DataType,7> supportedTypes = {
1915  };
1916 
1917  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1918  "Reference maximum: input 0 is not a supported type.");
1919 
1920  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1921  "Reference maximum: input 1 is not a supported type.");
1922 
1923  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1924  "Reference maximum: output is not a supported type.");
1925 
1926  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1927  "Reference maximum: input 0 and Input 1 types are mismatched");
1928 
1929  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1930  "Reference maximum: input and output types are mismatched");
1931 
1932  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1933  "Reference maximum: shapes are not suitable for implicit broadcast.");
1934 
1935  return supported;
1936 }
1937 
1939  const TensorInfo& output,
1940  const MeanDescriptor& descriptor,
1941  Optional<std::string&> reasonIfUnsupported) const
1942 {
1943  bool supported = true;
1944  std::string meanLayerStr = "Mean";
1945  std::string outputTensorStr = "output";
1946 
1947  std::array<DataType,6> supportedTypes =
1948  {
1955  };
1956 
1957  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1958  "Reference Mean: input type not supported.");
1959 
1960  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1961  "Reference Mean: input and output types are mismatched");
1962 
1963  if (descriptor.m_KeepDims)
1964  {
1965  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1966  reasonIfUnsupported,
1967  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1968  output.GetNumDimensions(),
1969  meanLayerStr, outputTensorStr).data());
1970  }
1971  else if (descriptor.m_Axis.empty())
1972  {
1973  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1974  reasonIfUnsupported,
1975  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1976  meanLayerStr, outputTensorStr).data());
1977  }
1978  else
1979  {
1980  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1981 
1982  if (outputDim > 0)
1983  {
1984  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1985  reasonIfUnsupported,
1986  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1987  meanLayerStr, outputTensorStr).data());
1988  }
1989  else
1990  {
1991  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1992  reasonIfUnsupported,
1993  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1994  meanLayerStr, outputTensorStr).data());
1995  }
1996  }
1997 
1998  return supported;
1999 }
2000 
2002  const TensorInfo &output,
2003  Optional<std::string &> reasonIfUnsupported) const
2004 {
2005  bool supported = true;
2006 
2007  std::array<DataType,7> supportedTypes =
2008  {
2016  };
2017 
2018  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2019  "Reference MemCopy: input type not supported");
2020 
2021  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2022  "Reference MemCopy: output type not supported");
2023 
2024  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2025  "Reference MemCopy: input and output types are mismatched");
2026 
2027  return supported;
2028 }
2029 
2031  const TensorInfo& input1,
2032  const TensorInfo& output,
2033  Optional<std::string&> reasonIfUnsupported) const
2034 {
2035  bool supported = true;
2036 
2037  std::array<DataType,7> supportedTypes = {
2044  };
2045 
2046  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2047  "Reference minimum: input 0 is not a supported type.");
2048 
2049  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2050  "Reference minimum: input 1 is not a supported type.");
2051 
2052  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2053  "Reference minimum: output is not a supported type.");
2054 
2055  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2056  "Reference minimum: input 0 and Input 1 types are mismatched");
2057 
2058  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2059  "Reference minimum: input and output types are mismatched");
2060 
2061  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2062  "Reference minimum: shapes are not suitable for implicit broadcast.");
2063 
2064  return supported;
2065 }
2066 
2068  const TensorInfo& input1,
2069  const TensorInfo& output,
2070  Optional<std::string&> reasonIfUnsupported) const
2071 {
2072  bool supported = true;
2073 
2074  std::array<DataType,7> supportedTypes = {
2081  };
2082 
2083  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2084  "Reference multiplication: input 0 is not a supported type.");
2085 
2086  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2087  "Reference multiplication: input 1 is not a supported type.");
2088 
2089  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2090  "Reference multiplication: output is not a supported type.");
2091 
2092  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2093  "Reference multiplication: input 0 and Input 1 types are mismatched");
2094 
2095  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2096  "Reference multiplication: input and output types are mismatched");
2097 
2098  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2099  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2100 
2101  return supported;
2102 }
2103 
2105  const TensorInfo& output,
2106  const NormalizationDescriptor& descriptor,
2107  Optional<std::string&> reasonIfUnsupported) const
2108 {
2109  IgnoreUnused(descriptor);
2110 
2111  // Define supported types
2112  std::array<DataType, 6> supportedTypes =
2113  {
2119  };
2120 
2121  bool supported = true;
2122 
2123  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2124  "Reference normalization: input type not supported.");
2125 
2126  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2127  "Reference normalization: output type not supported.");
2128 
2129  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2130  "Reference normalization: input and output shapes have different "
2131  "num total elements.");
2132 
2133  return supported;
2134 }
2135 
2137  Optional<std::string&> /*reasonIfUnsupported*/) const
2138 {
2139  return true;
2140 }
2141 
2143  const TensorInfo& output,
2144  const PadDescriptor& descriptor,
2145  Optional<std::string&> reasonIfUnsupported) const
2146 {
2147  IgnoreUnused(descriptor);
2148  bool supported = true;
2149 
2150  // Define supported output and inputs types.
2151  std::array<DataType,6> supportedTypes =
2152  {
2158  };
2159 
2160  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2161  "Reference pad: input is not a supported type.");
2162 
2163  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2164  "Reference pad: output is not a supported type.");
2165 
2166  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2167  "Reference pad: input and output types are mismatched.");
2168 
2169  return supported;
2170 }
2171 
2173  const TensorInfo& output,
2174  const PermuteDescriptor& descriptor,
2175  Optional<std::string&> reasonIfUnsupported) const
2176 {
2177  IgnoreUnused(descriptor);
2178  bool supported = true;
2179 
2180  // Define supported output and inputs types.
2181  std::array<DataType, 6> supportedTypes =
2182  {
2189  };
2190 
2191  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2192  "Reference permute: input is not a supported type.");
2193 
2194  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2195  "Reference permute: output is not a supported type.");
2196 
2197  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2198  "Reference permute: input and output types are mismatched.");
2199 
2200  return supported;
2201 }
2202 
2204  const TensorInfo& output,
2205  const Pooling2dDescriptor& descriptor,
2206  Optional<std::string&> reasonIfUnsupported) const
2207 {
2208  IgnoreUnused(descriptor);
2209  bool supported = true;
2210 
2211  // Define supported output and inputs types.
2212  std::array<DataType,6> supportedTypes =
2213  {
2219  };
2220 
2221  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2222  "Reference poolind2d: input is not a supported type.");
2223 
2224  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2225  "Reference poolind2d: output is not a supported type.");
2226 
2227  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2228  "Reference poolind2d: input and output types are mismatched.");
2229 
2230  return supported;
2231 }
2232 
2234  const TensorInfo& output,
2235  const Pooling3dDescriptor& descriptor,
2236  Optional<std::string&> reasonIfUnsupported) const
2237 {
2238  IgnoreUnused(descriptor);
2239  bool supported = true;
2240 
2241  // Define supported output and inputs types.
2242  std::array<DataType,6> supportedTypes =
2243  {
2249  };
2250 
2251  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2252  "Reference poolind3d: input is not a supported type.");
2253 
2254  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2255  "Reference poolind3d: output is not a supported type.");
2256 
2257  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2258  "Reference poolind3d: input and output types are mismatched.");
2259 
2260  return supported;
2261 }
2262 
2263 
2265  const TensorInfo& previousOutputIn,
2266  const TensorInfo& previousCellStateIn,
2267  const TensorInfo& outputStateOut,
2268  const TensorInfo& cellStateOut,
2269  const TensorInfo& output,
2270  const QLstmDescriptor& descriptor,
2271  const LstmInputParamsInfo& paramsInfo,
2272  Optional<std::string&> reasonIfUnsupported) const
2273 {
2274  IgnoreUnused(input);
2275  IgnoreUnused(previousOutputIn);
2276  IgnoreUnused(previousCellStateIn);
2277  IgnoreUnused(outputStateOut);
2278  IgnoreUnused(cellStateOut);
2279  IgnoreUnused(output);
2280  IgnoreUnused(descriptor);
2281  IgnoreUnused(paramsInfo);
2282 
2283  IgnoreUnused(reasonIfUnsupported);
2284 
2285  return true;
2286 }
2287 
2289  const TensorInfo& output,
2290  Optional<std::string&> reasonIfUnsupported) const
2291 {
2292  bool supported = true;
2293 
2294  // Define supported input types.
2295  std::array<DataType,7> supportedInputTypes = {
2302  };
2303 
2304  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2305  "Reference quantize: input type not supported.");
2306 
2307  // Define supported output types.
2308  std::array<DataType,4> supportedOutputTypes = {
2313  };
2314  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2315  "Reference quantize: output type not supported.");
2316 
2317  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2318  "Reference quantize: input and output shapes have different num total elements.");
2319 
2320  return supported;
2321 }
2322 
2324  const TensorInfo& output,
2325  Optional<std::string&> reasonIfUnsupported) const
2326 {
2327  IgnoreUnused(input);
2328  // Define supported output types.
2329  std::array<DataType,1> supportedOutputTypes =
2330  {
2332  };
2333 
2334  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2335  "Reference rank: input type not supported.");
2336 }
2337 
2339  const TensorInfo& output,
2340  const ReduceDescriptor& descriptor,
2341  Optional<std::string&> reasonIfUnsupported) const
2342 {
2343  IgnoreUnused(descriptor);
2344  bool supported = true;
2345  std::array<DataType,7> supportedTypes =
2346  {
2353  };
2354 
2355  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2356  "Reference Reduce: input type not supported");
2357 
2358  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2359  "Reference Reduce: output type not supported");
2360 
2361  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2362  "Reference Reduce: input and output types not matching");
2363 
2364  return supported;
2365 }
2366 
2368  const TensorInfo& output,
2369  const ReshapeDescriptor& descriptor,
2370  Optional<std::string&> reasonIfUnsupported) const
2371 {
2372  IgnoreUnused(output);
2373  IgnoreUnused(descriptor);
2374  // Define supported output types.
2375  std::array<DataType,8> supportedOutputTypes =
2376  {
2385  };
2386 
2387  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2388  "Reference reshape: input type not supported.");
2389 }
2390 
2392  const TensorInfo& output,
2393  const ResizeDescriptor& descriptor,
2394  Optional<std::string&> reasonIfUnsupported) const
2395 {
2396  IgnoreUnused(descriptor);
2397  bool supported = true;
2398  std::array<DataType,7> supportedTypes =
2399  {
2407  };
2408 
2409  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2410  "Reference Resize: input type not supported");
2411 
2412  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2413  "Reference Resize: output type not supported");
2414 
2415  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2416  "Reference Resize: input and output types not matching");
2417 
2418  return supported;
2419 }
2420 
2422  const TensorInfo& input1,
2423  const TensorInfo& output,
2424  Optional<std::string&> reasonIfUnsupported) const
2425 {
2426  bool supported = true;
2427  // ReverseV2 is data type agnostic so it can support all the types in the Reference backend
2428  std::array<DataType,8> supportedTypes =
2429  {
2438  };
2439 
2440  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2441  "Reference ReverseV2: input0 type not supported");
2442 
2443  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2444  "Reference ReverseV2: output type not supported");
2445 
2446  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2447  "Reference ReverseV2: input0 and output types not matching");
2448 
2449  std::array<DataType,6> input2SupportedTypes =
2450  {
2452  };
2453 
2454  supported &= CheckSupportRule(TypeAnyOf(input1, input2SupportedTypes), reasonIfUnsupported,
2455  "Reference ReverseV2: input1 type not supported");
2456 
2457  return supported;
2458 }
2459 
2461  const TensorInfo& indices,
2462  const TensorInfo& updates,
2463  const TensorInfo& output,
2464  const ScatterNdDescriptor& descriptor,
2465  Optional<std::string&> reasonIfUnsupported) const
2466 {
2467  IgnoreUnused(descriptor);
2468 
2469  bool supported = true;
2470 
2471  std::array<DataType, 7> supportedTypes
2472  {
2480  };
2481 
2482  std::array<DataType, 1> indicesSupportedTypes =
2483  {
2485  };
2486 
2487  supported &= CheckSupportRule(TypeAnyOf(indices, indicesSupportedTypes), reasonIfUnsupported,
2488  "ScatterNd: indices type not supported.");
2489 
2490  supported &= CheckSupportRule(TypeAnyOf(updates, supportedTypes), reasonIfUnsupported,
2491  "ScatterNd: updates type not supported.");
2492 
2493  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2494  "ScatterNd: output type not supported");
2495 
2496  supported &= CheckSupportRule(TypesAreEqual(updates, output), reasonIfUnsupported,
2497  "ScatterNd: input and updates types are mismatched");
2498 
2499  if (descriptor.m_InputEnabled)
2500  {
2501  // If the input slot is enabled, we have the input tensor in this slot
2502  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2503  "ScatterNd: input type not supported.");
2504 
2505  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2506  "ScatterNd: input and output types are mismatched");
2507  }
2508  else
2509  {
2510  // If the input slot is not enabled, we have the shape tensor in this slot
2511  supported &= CheckSupportRule(TypeAnyOf(input, indicesSupportedTypes), reasonIfUnsupported,
2512  "ScatterNd: shape type not supported.");
2513  }
2514 
2515  return supported;
2516 }
2517 
2519  const TensorInfo& output,
2520  Optional<std::string&> reasonIfUnsupported) const
2521 {
2522  IgnoreUnused(input);
2523  bool supported = true;
2524 
2525  std::array<DataType, 1> supportedTypes =
2526  {
2528  };
2529 
2530  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2531  "Reference Shape: output type not supported");
2532 
2533  return supported;
2534 }
2535 
2537  const TensorInfo& output,
2538  const SliceDescriptor& descriptor,
2539  Optional<std::string&> reasonIfUnsupported) const
2540 {
2541  IgnoreUnused(descriptor);
2542  bool supported = true;
2543 
2544  std::array<DataType, 5> supportedTypes =
2545  {
2551  };
2552 
2553  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2554  "Reference Slice: input type not supported");
2555 
2556  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2557  "Reference Slice: output type not supported");
2558 
2559  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2560  "Reference Slice: input and output types are mismatched");
2561 
2562  return supported;
2563 }
2564 
2566  const TensorInfo& output,
2567  const SoftmaxDescriptor& descriptor,
2568  Optional<std::string&> reasonIfUnsupported) const
2569 {
2570  IgnoreUnused(descriptor);
2571  bool supported = true;
2572  std::array<DataType,7> supportedTypes =
2573  {
2580  };
2581 
2582  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2583  "Reference Softmax: output type not supported");
2584 
2585  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2586  "Reference Softmax: input type not supported");
2587 
2588  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2589  "Reference Softmax: input type not supported");
2590 
2591  return supported;
2592 }
2593 
2595  const TensorInfo& output,
2596  const SpaceToBatchNdDescriptor& descriptor,
2597  Optional<std::string&> reasonIfUnsupported) const
2598 {
2599  IgnoreUnused(descriptor);
2600  bool supported = true;
2601  std::array<DataType,6> supportedTypes =
2602  {
2608  };
2609 
2610  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2611  "Reference SpaceToBatchNd: input type not supported");
2612 
2613  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2614  "Reference SpaceToBatchNd: output type not supported");
2615 
2616  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2617  "Reference SpaceToBatchNd: input and output types are mismatched");
2618 
2619  return supported;
2620 }
2621 
2623  const TensorInfo& output,
2624  const SpaceToDepthDescriptor& descriptor,
2625  Optional<std::string&> reasonIfUnsupported) const
2626 {
2627 
2628  IgnoreUnused(descriptor);
2629  bool supported = true;
2630 
2631  std::array<DataType,6> supportedTypes =
2632  {
2638  };
2639 
2640  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2641  "Reference SpaceToDepth: input type not supported");
2642 
2643  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2644  "Reference SpaceToDepth: output type not supported");
2645 
2646  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2647  "Reference SpaceToDepth: input and output types are mismatched");
2648 
2649  return supported;
2650 }
2651 
2653  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
2654  const ViewsDescriptor& descriptor,
2655  Optional<std::string&> reasonIfUnsupported) const
2656 {
2657  IgnoreUnused(descriptor);
2658  bool supported = true;
2659  std::array<DataType,6> supportedTypes =
2660  {
2666  };
2667 
2668  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2669  "Reference splitter: output type not supported");
2670  for (const TensorInfo& output : outputs)
2671  {
2672  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2673  "Reference splitter: input type not supported");
2674 
2675  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2676  "Reference splitter: input and output types mismatched.");
2677  }
2678 
2679  return supported;
2680 }
2681 
2682 bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
2683  const TensorInfo& output,
2684  const StackDescriptor& descriptor,
2685  Optional<std::string&> reasonIfUnsupported) const
2686 {
2687  IgnoreUnused(descriptor);
2688 
2689  bool supported = true;
2690  std::array<DataType,7> supportedTypes =
2691  {
2698  };
2699 
2700  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2701  "Reference stack: output type not supported");
2702  for (const TensorInfo* input : inputs)
2703  {
2704  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2705  "Reference stack: input type not supported");
2706 
2707  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2708  "Reference stack: input and output types mismatched.");
2709  }
2710 
2711  return supported;
2712 }
2713 
2715  const TensorInfo& output,
2716  const StridedSliceDescriptor& descriptor,
2717  Optional<std::string&> reasonIfUnsupported) const
2718 {
2719  IgnoreUnused(descriptor);
2720  bool supported = true;
2721 
2722  std::array<DataType,5> supportedTypes =
2723  {
2728  };
2729 
2730  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2731  "Reference StridedSlice: input type not supported");
2732 
2733  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2734  "Reference StridedSlice: output type not supported");
2735 
2736  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2737  "Reference StridedSlice: input and output types are mismatched");
2738 
2739  return supported;
2740 }
2741 
2743  const TensorInfo& input1,
2744  const TensorInfo& output,
2745  Optional<std::string&> reasonIfUnsupported) const
2746 {
2747  bool supported = true;
2748 
2749  std::array<DataType,7> supportedTypes = {
2756  };
2757 
2758  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2759  "Reference subtraction: input 0 is not a supported type.");
2760 
2761  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2762  "Reference subtraction: input 1 is not a supported type.");
2763 
2764  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2765  "Reference subtraction: output is not a supported type.");
2766 
2767  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2768  "Reference subtraction: input 0 and Input 1 types are mismatched");
2769 
2770  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2771  "Reference subtraction: input and output types are mismatched");
2772 
2773  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2774  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2775 
2776  return supported;
2777 }
2778 
2780  const TensorInfo& alpha,
2781  const TensorInfo& output,
2782  Optional<std::string&> reasonIfUnsupported) const
2783 {
2784  bool supported = true;
2785 
2786  std::array<DataType, 6> supportedTypes
2787  {
2793  };
2794 
2795  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2796  "PReLU: input is not a supported type.");
2797 
2798  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2799  "PReLU: alpha is not a supported type.");
2800 
2801  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2802  "PReLU: output is not a supported type.");
2803 
2804  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2805  "PReLU: input, alpha and output types are mismatched");
2806 
2807  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2808  "PReLU: shapes are not suitable for implicit broadcast");
2809 
2810  return supported;
2811 }
2812 
2814  const TensorInfo& output,
2815  const TileDescriptor& descriptor,
2816  Optional<std::string&> reasonIfUnsupported) const
2817 {
2818  IgnoreUnused(descriptor);
2819 
2820  bool supported = true;
2821 
2822  std::array<DataType, 7> supportedTypes
2823  {
2831  };
2832 
2833  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2834  "Tile: input type not supported.");
2835 
2836  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2837  "Tile: output type not supported");
2838 
2839  return supported;
2840 }
2841 
2843  const TensorInfo& output,
2844  const TransposeConvolution2dDescriptor& descriptor,
2845  const TensorInfo& weights,
2846  const Optional<TensorInfo>& biases,
2847  Optional<std::string&> reasonIfUnsupported) const
2848 {
2849  IgnoreUnused(descriptor);
2850  bool supported = true;
2851 
2852  std::array<DataType,7> supportedTypes =
2853  {
2860  };
2861 
2862  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2863  "Reference TransposeConvolution2d: input is not a supported type.");
2864 
2865  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2866  "Reference TransposeConvolution2d: output is not a supported type.");
2867 
2868  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2869  "Reference TransposeConvolution2d: input and output types mismatched.");
2870 
2871 
2872  const DataType inputType = input.GetDataType();
2873  if (IsQuantized8BitType(inputType))
2874  {
2875  std::array<DataType, 3> supportedWeightTypes =
2876  {
2880  };
2881 
2882  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2883  "Reference TransposeConvolution2d: weights type not supported for "
2884  "quantized input.");
2885  }
2886  else
2887  {
2888  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2889  "Reference TransposeConvolution2d: weights is not a supported type.");
2890 
2891  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2892  "Reference TransposeConvolution2d: input and weights types mismatched.");
2893  }
2894 
2895  if (biases.has_value())
2896  {
2897  std::array<DataType,4> biasesSupportedTypes =
2898  {
2902  };
2903  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2904  "Reference TransposeConvolution2d: biases is not a supported type.");
2905  }
2906 
2907  return supported;
2908 }
2909 
2911  const TensorInfo& output,
2912  const TransposeDescriptor& descriptor,
2913  Optional<std::string&> reasonIfUnsupported) const
2914 {
2915  IgnoreUnused(descriptor);
2916  bool supported = true;
2917 
2918  // Define supported output and inputs types.
2919  std::array<DataType, 6> supportedTypes =
2920  {
2927  };
2928 
2929  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2930  "Reference transpose: input is not a supported type.");
2931 
2932  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2933  "Reference transpose: output is not a supported type.");
2934 
2935  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2936  "Reference transpose: input and output types are mismatched.");
2937 
2938  return supported;
2939 }
2940 
2942  const TensorInfo& input,
2943  const TensorInfo& outputStateIn,
2944  const TensorInfo& cellStateIn,
2945  const TensorInfo& outputStateOut,
2946  const TensorInfo& cellStateOut,
2947  const TensorInfo& output,
2948  const UnidirectionalSequenceLstmDescriptor& descriptor,
2949  const LstmInputParamsInfo& paramsInfo,
2950  Optional<std::string&> reasonIfUnsupported) const
2951 {
2952  IgnoreUnused(descriptor);
2953  IgnoreUnused(paramsInfo);
2954  IgnoreUnused(outputStateIn);
2955  IgnoreUnused(cellStateIn);
2956  IgnoreUnused(outputStateOut);
2957  IgnoreUnused(cellStateOut);
2958  bool supported = true;
2959 
2960  std::array<DataType, 2> supportedTypes =
2961  {
2964  };
2965 
2966  std::array<DataType, 2> supportedWeightTypes =
2967  {
2970  };
2971 
2972  std::array<DataType, 3> supportedBiasTypes =
2973  {
2977  };
2978 
2979  // check inputs and outputs
2980  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2981  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2982  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2983  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2984 
2985  // check layer parameters
2986  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2987  reasonIfUnsupported,
2988  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2989  "is not a supported type.");
2990  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2991  reasonIfUnsupported,
2992  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2993  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2994  reasonIfUnsupported,
2995  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2996  "is not a supported type.");
2997  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2998  reasonIfUnsupported,
2999  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
3000  "is not a supported type.");
3001  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
3002  reasonIfUnsupported,
3003  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
3004  "is not a supported type.");
3005  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
3006  reasonIfUnsupported,
3007  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
3008  "is not a supported type.");
3009 
3010  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
3011  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
3012  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
3013  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
3014  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
3015  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
3016  if (!descriptor.m_CifgEnabled)
3017  {
3018  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
3019  reasonIfUnsupported,
3020  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
3021  "is not a supported type.");
3022  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
3023  reasonIfUnsupported,
3024  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
3025  "is not a supported type.");
3026  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
3027  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
3028  if (descriptor.m_PeepholeEnabled)
3029  {
3030  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
3031  reasonIfUnsupported,
3032  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
3033  "is not a supported type.");
3034  }
3035  }
3036  if (descriptor.m_PeepholeEnabled)
3037  {
3038  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
3039  reasonIfUnsupported,
3040  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
3041  "is not a supported type.");
3042  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
3043  reasonIfUnsupported,
3044  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
3045  "is not a supported type.");
3046  }
3047  if (descriptor.m_ProjectionEnabled)
3048  {
3049  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
3050  reasonIfUnsupported,
3051  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
3052  "is not a supported type.");
3053  if (paramsInfo.m_ProjectionBias != nullptr)
3054  {
3055  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
3056  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
3057  "are mismatched");
3058  }
3059  }
3060  if (descriptor.m_LayerNormEnabled)
3061  {
3062  if (!descriptor.m_CifgEnabled)
3063  {
3064  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
3065  reasonIfUnsupported,
3066  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
3067  "is not a supported type.");
3068  }
3069  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
3070  reasonIfUnsupported,
3071  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
3072  "is not a supported type.");
3073  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
3074  reasonIfUnsupported,
3075  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
3076  "is not a supported type.");
3077  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
3078  reasonIfUnsupported,
3079  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
3080  "is not a supported type.");
3081  }
3082 
3083  return supported;
3084 }
3085 
3086 } // namespace armnn
armnn::RefLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1790
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::RefLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1235
armnn::DataType::Boolean
@ Boolean
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::LayerType::Permute
@ Permute
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::LstmInputParamsInfo::GetCellBias
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
armnn::RefLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2233
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::LayerType::Splitter
@ Splitter
armnn::RefLayerSupport::IsReverseV2Supported
bool IsReverseV2Supported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2421
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::Optional
Definition: Optional.hpp:270
armnn::RefLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2264
armnn::IsQuantized8BitType
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:316
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu
armnn::RefLayerSupport::IsTileSupported
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2813
armnn::LstmInputParamsInfo::GetInputToCellWeights
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::RefLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1039
armnn::RefLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1512
armnn::BiasAndWeightsTypesMatch
Definition: LayerSupportRules.hpp:117
armnn::RefLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1304
armnn::ActivationFunction::SoftReLu
@ SoftReLu
armnn::Rule
Definition: LayerSupportRules.hpp:47
armnn::RefLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2203
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::RefLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: RefLayerSupport.cpp:61
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::RefLayerSupport::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1339
TypesUtils.hpp
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::ScatterNdDescriptor::m_InputEnabled
bool m_InputEnabled
Flag to show if input tensor is accepted.
Definition: Descriptors.hpp:1722
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::RefLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:628
armnn::RefLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1665
armnn::LayerType::Transpose
@ Transpose
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::LayerType::Comparison
@ Comparison
armnn::RefLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1938
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:197
armnn::RefLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2842
armnn::LstmInputParamsInfo::GetProjectionBias
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ChannelShuffleDescriptor
A ChannelShuffleDescriptor for the ChannelShuffle operator.
Definition: Descriptors.hpp:1562
armnn::DataType::Float32
@ Float32
armnn::ActivationFunction::TanH
@ TanH
armnn::LstmInputParamsInfo::GetInputGateBias
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
armnn::LayerType::Tile
@ Tile
armnn::RefLayerSupport::IsRankSupported
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2323
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::LayerType::Stack
@ Stack
armnn::DataType::QSymmS8
@ QSymmS8
armnn::StackDescriptor
A StackDescriptor for the StackLayer.
Definition: Descriptors.hpp:1251
armnn::LstmInputParamsInfo::GetRecurrentToInputWeights
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
armnn::LstmInputParamsInfo::GetRecurrentToForgetWeights
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
IgnoreUnused.hpp
armnn::LayerType::Normalization
@ Normalization
RefLayerSupport.hpp
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::Reduce
@ Reduce
armnn::RefLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2652
armnn::RefLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1108
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::RefLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:697
armnn::LstmInputParamsInfo::GetRecurrentToCellWeights
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ActivationFunction::Gelu
@ Gelu
armnn::RefLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2742
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::RefLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2288
NumericCast.hpp
armnn::DataType::BFloat16
@ BFloat16
armnn::RefLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:665
armnn::ShapesAreSameRank
Definition: LayerSupportRules.hpp:137
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::RefLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2172
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1192
armnn::RefLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:915
LayerSupportCommon.hpp
armnn::RefLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:742
armnn::LstmInputParamsInfo::GetInputLayerNormWeights
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
armnn::TypeNotPerAxisQuantized
Definition: LayerSupportRules.hpp:109
armnn::LayerType::Slice
@ Slice
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1148
armnn::RefLayerSupport::IsDilatedDepthwiseConvolutionSupported
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1371
armnn::RefLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2622
armnn::RefLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:560
armnn::DataType::Float16
@ Float16
armnn::RefLayerSupport::IsFakeQuantizationSupported
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1469
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::LayerSupportBase::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:120
armnn::RefLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1418
armnn::RefLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:949
armnn::LstmInputParamsInfo::GetCellToInputWeights
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
armnn::LstmInputParamsInfo::GetRecurrentToOutputWeights
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::ShapesAreBroadcastCompatible
Definition: LayerSupportRules.hpp:153
armnn::LstmInputParamsInfo::GetInputToInputWeights
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
armnn::RefLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2338
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::ScatterNd
@ ScatterNd
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:526
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::Concat
@ Concat
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn::RefLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2391
armnn::RefLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1206
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::SliceDescriptor
A SliceDescriptor for the SliceLayer.
Definition: Descriptors.hpp:1228
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::RefLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:788
armnn::RefLayerSupport::IsBroadcastToSupported
bool IsBroadcastToSupported(const TensorInfo &input, const TensorInfo &output, const BroadcastToDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:823
armnn::ActivationFunction::Elu
@ Elu
armnn::RefLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2594
armnn::TensorNumDimensionsAreCorrect
Definition: LayerSupportRules.hpp:180
armnn::LstmInputParamsInfo::GetForgetGateBias
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:112
armnn::RefLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1702
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::LayerType::Quantize
@ Quantize
armnn::RefLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1901
armnn::LstmInputParamsInfo::GetCellToForgetWeights
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
armnn::ActivationFunction::Linear
@ Linear
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: LayerSupportBase.cpp:105
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::LayerType::Multiplication
@ Multiplication
armnn::PermuteDescriptor
A PermuteDescriptor for the PermuteLayer.
Definition: Descriptors.hpp:149
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::LayerType::Addition
@ Addition
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::RefLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2367
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::LayerType::BroadcastTo
@ BroadcastTo
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::RefLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:853
armnn::RefLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1534
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::LayerType::Division
@ Division
armnn::TypeIs
Definition: LayerSupportRules.hpp:101
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::RefLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2136
armnn::RefLayerSupport::IsDebugSupported
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1176
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ActivationFunction::Abs
@ Abs
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::RefLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:999
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::ElementwiseUnaryDescriptor::m_Operation
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:145
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::RefLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: RefLayerSupport.cpp:1736
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::RefLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1381
armnn::BiasAndWeightsTypesCompatible
Definition: LayerSupportRules.hpp:125
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::RefLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2104
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1190
armnn::LstmInputParamsInfo::GetInputToOutputWeights
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1146
armnn::RefLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2779
armnn::RefLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1487
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LstmInputParamsInfo::GetOutputGateBias
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1152
armnn::LstmInputParamsInfo::GetCellToOutputWeights
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::RefLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2067
armnn::LayerType::Minimum
@ Minimum
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::RefLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2714
armnn::LayerType::Map
@ Map
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::RefLayerSupport::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2001
armnn::RefLayerSupport::IsScatterNdSupported
bool IsScatterNdSupported(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &updates, const TensorInfo &output, const ScatterNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2460
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::LstmInputParamsInfo::GetOutputLayerNormWeights
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
armnn::ActivationFunction::ReLu
@ ReLu
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::RefLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:981
armnn::TypesAreEqual
Definition: LayerSupportRules.hpp:71
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::LayerType::Rank
@ Rank
armnn::RefLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:883
armnn::LayerType::Mean
@ Mean
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1150
armnn::RefLayerSupport::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2518
armnn::ActivationFunction::Square
@ Square
armnn::RefLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2910
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::DetectionPostProcessDescriptor
Definition: Descriptors.hpp:713
armnn::RefLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1019
armnn::RefLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1632
armnn::LstmInputParamsInfo::GetProjectionWeights
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
LayerSupportRules.hpp
armnn::RefLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2682
armnn::CheckSupportRule
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
Definition: LayerSupportRules.hpp:37
armnn::DataType::Signed64
@ Signed64
armnn::RefLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
Definition: RefLayerSupport.cpp:1764
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::BroadcastToDescriptor
Definition: Descriptors.hpp:1659
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::TypeAnyOf
Definition: LayerSupportRules.hpp:89
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::ScatterNdDescriptor
A ScatterNdDescriptor for the ScatterNdLayer.
Definition: Descriptors.hpp:1679
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn::RefLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2565
armnn::FakeQuantizationDescriptor
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
Definition: Descriptors.hpp:906
armnn::LstmInputParamsInfo::m_ProjectionBias
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
armnn::RefLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2536
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::LayerType::Unmap
@ Unmap
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::LstmInputParamsInfo::GetForgetLayerNormWeights
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
armnn::RefLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2142
armnn::LayerType::QLstm
@ QLstm
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::RefLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1601
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
armnn::RefLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2941
armnn::RefLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1671
armnn::LstmInputParamsInfo::GetCellLayerNormWeights
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::ShapesAreSameTotalSize
Definition: LayerSupportRules.hpp:145
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
armnn::TensorNumDimensionsAreGreaterOrEqualTo
Definition: LayerSupportRules.hpp:188
armnn::RefLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:2030
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::LstmInputParamsInfo::GetInputToForgetWeights
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
armnn::ActivationFunction::Sigmoid
@ Sigmoid