ArmNN
 25.02
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
BackendHelper.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017,2022-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include <armnn/BackendId.hpp>
10 #include <armnn/LstmParams.hpp>
12 #include <armnn/Tensor.hpp>
13 #include <armnn/Types.hpp>
16 
17 #include <stddef.h>
18 
19 namespace armnn
20 {
21 
22 // Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
24 {
26 
27  if (!backendRegistry.IsBackendRegistered(backend))
28  {
29  return LayerSupportHandle(nullptr);
30  }
31 
32  auto factoryFunc = backendRegistry.GetFactory(backend);
33  auto backendObject = factoryFunc();
34  return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
35 }
36 
37 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
38  const BackendCapabilities& capabilities)
39 {
40  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
41  {
42  const auto& capability = capabilities.GetOption(i);
43  if (backendCapabilityName == capability.GetName())
44  {
45  return capability;
46  }
47  }
48  return EmptyOptional();
49 }
50 
51 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
52  const armnn::BackendId& backend)
53 {
54  auto const& backendRegistry = armnn::BackendRegistryInstance();
55  if (backendRegistry.IsBackendRegistered(backend))
56  {
57  auto factoryFunc = backendRegistry.GetFactory(backend);
58  auto backendObject = factoryFunc();
59  auto capabilities = backendObject->GetCapabilities();
60  return GetCapability(backendCapabilityName, capabilities);
61  }
62  return EmptyOptional();
63 }
64 
65 bool HasCapability(const std::string& name, const BackendCapabilities& capabilities)
66 {
67  return GetCapability(name, capabilities).has_value();
68 }
69 
70 bool HasCapability(const std::string& name, const armnn::BackendId& backend)
71 {
72  return GetCapability(name, backend).has_value();
73 }
74 
75 bool HasCapability(const BackendOptions::BackendOption& capability, const BackendCapabilities& capabilities)
76 {
77  return HasMatchingCapability(capability, capabilities);
78 }
79 
80 bool HasCapability(const BackendOptions::BackendOption& backendOption, const armnn::BackendId& backend)
81 {
82  return HasMatchingCapability(backendOption, backend);
83 }
84 
85 bool HasMatchingCapability(const BackendOptions::BackendOption& capability, const BackendCapabilities& capabilities)
86 {
87  for (size_t i=0; i < capabilities.GetOptionCount(); i++)
88  {
89  const auto& backendCapability = capabilities.GetOption(i);
90  if (capability.GetName() == backendCapability.GetName())
91  {
92  if (capability.GetValue().IsBool() && backendCapability.GetValue().IsBool())
93  {
94  return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
95  }
96  else if (capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
97  {
98  return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
99  }
100  else if (capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
101  {
102  return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
103  }
104  else if (capability.GetValue().IsString() && backendCapability.GetValue().IsString())
105  {
106  return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
107  }
108  else if (capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
109  {
110  return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
111  }
112  }
113  }
114  return false;
115 }
116 
117 bool HasMatchingCapability(const BackendOptions::BackendOption& backendOption, const armnn::BackendId& backend)
118 {
119  auto const& backendRegistry = armnn::BackendRegistryInstance();
120  if (backendRegistry.IsBackendRegistered(backend))
121  {
122  auto factoryFunc = backendRegistry.GetFactory(backend);
123  auto backendObject = factoryFunc();
124  auto capabilities = backendObject->GetCapabilities();
125  return HasMatchingCapability(backendOption, capabilities);
126  }
127  return false;
128 }
129 
130 unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend)
131 {
132  auto const& backendRegistry = armnn::BackendRegistryInstance();
133  if (backendRegistry.IsBackendRegistered(backend))
134  {
135  auto factoryFunc = backendRegistry.GetFactory(backend);
136  auto backendObject = factoryFunc();
137  return backendObject->GetNumberOfCacheFiles();
138  }
139  return 0;
140 }
141 
143 {
144  if (m_LayerSupport)
145  {
146  return true;
147  }
148 
149  return false;
150 }
151 
152 using TensorInfos = std::vector<TensorInfo>;
153 
155  const TensorInfo& output,
156  const ActivationDescriptor& descriptor,
157  Optional<std::string&> reasonIfUnsupported)
158 {
159  TensorInfos infos{input, output};
160 
161  return m_LayerSupport->IsLayerSupported(LayerType::Activation,
162  infos,
163  descriptor,
164  EmptyOptional(),
165  EmptyOptional(),
166  reasonIfUnsupported);
167 }
168 
170  const TensorInfo& input1,
171  const TensorInfo& output,
172  Optional<std::string&> reasonIfUnsupported)
173 {
174  TensorInfos infos{input0, input1, output};
175 
176  return m_LayerSupport->IsLayerSupported(LayerType::Addition,
177  infos,
178  BaseDescriptor(),
179  EmptyOptional(),
180  EmptyOptional(),
181  reasonIfUnsupported);
182 }
183 
185  const TensorInfo& output,
186  const ArgMinMaxDescriptor& descriptor,
187  Optional<std::string&> reasonIfUnsupported)
188 {
189  TensorInfos infos{input, output};
190 
191  return m_LayerSupport->IsLayerSupported(LayerType::ArgMinMax,
192  infos,
193  descriptor,
194  EmptyOptional(),
195  EmptyOptional(),
196  reasonIfUnsupported);
197 }
198 
200  const TensorInfo& input1,
201  const TensorInfo& output,
202  const BatchMatMulDescriptor& descriptor,
203  Optional<std::string&> reasonIfUnsupported)
204 {
205  TensorInfos infos{input0, input1, output};
206 
207  return m_LayerSupport->IsLayerSupported(LayerType::BatchMatMul,
208  infos,
209  descriptor,
210  EmptyOptional(),
211  EmptyOptional(),
212  reasonIfUnsupported);
213 }
214 
216  const TensorInfo& output,
217  const TensorInfo& mean,
218  const TensorInfo& var,
219  const TensorInfo& beta,
220  const TensorInfo& gamma,
221  const BatchNormalizationDescriptor& descriptor,
222  Optional<std::string&> reasonIfUnsupported)
223 {
224  TensorInfos infos{input, output, mean, var, beta, gamma};
225 
226  return m_LayerSupport->IsLayerSupported(LayerType::BatchNormalization,
227  infos,
228  descriptor,
229  EmptyOptional(),
230  EmptyOptional(),
231  reasonIfUnsupported);
232 }
233 
235  const TensorInfo& output,
236  const BatchToSpaceNdDescriptor& descriptor,
237  Optional<std::string&> reasonIfUnsupported)
238 {
239  TensorInfos infos{input, output};
240 
241  return m_LayerSupport->IsLayerSupported(LayerType::BatchToSpaceNd,
242  infos,
243  descriptor,
244  EmptyOptional(),
245  EmptyOptional(),
246  reasonIfUnsupported);
247 }
248 
249 
251  const TensorInfo& output,
252  const armnn::BroadcastToDescriptor& descriptor,
253  Optional<std::string&> reasonIfUnsupported)
254 {
255  TensorInfos infos{input, output};
256 
257  return m_LayerSupport->IsLayerSupported(LayerType::BroadcastTo,
258  infos,
259  descriptor,
260  EmptyOptional(),
261  EmptyOptional(),
262  reasonIfUnsupported.value());
263 }
264 
266  const TensorInfo& output,
267  Optional<std::string&> reasonIfUnsupported)
268 {
269  TensorInfos infos{input, output};
270 
271  return m_LayerSupport->IsLayerSupported(LayerType::Cast,
272  infos,
273  BaseDescriptor(),
274  EmptyOptional(),
275  EmptyOptional(),
276  reasonIfUnsupported);
277 }
278 
280  const TensorInfo &output,
281  const ChannelShuffleDescriptor &descriptor,
282  Optional<std::string &> reasonIfUnsupported)
283 {
284  TensorInfos infos{input, output};
285 
286  return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
287  infos,
288  descriptor,
289  EmptyOptional(),
290  EmptyOptional(),
291  reasonIfUnsupported);
292 }
293 
295  const TensorInfo& input1,
296  const TensorInfo& output,
297  const ComparisonDescriptor& descriptor,
298  Optional<std::string&> reasonIfUnsupported)
299 {
300  TensorInfos infos{input0, input1, output};
301 
302  return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
303  infos,
304  descriptor,
305  EmptyOptional(),
306  EmptyOptional(),
307  reasonIfUnsupported);
308 }
309 
310 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
311  const TensorInfo& output,
312  const OriginsDescriptor& descriptor,
313  Optional<std::string&> reasonIfUnsupported)
314 {
315  TensorInfos infos;
316  for (const TensorInfo* inputInfo : inputs)
317  {
318  infos.push_back(*inputInfo);
319  }
320  infos.push_back(output);
321 
322  return m_LayerSupport->IsLayerSupported(LayerType::Concat,
323  infos,
324  descriptor,
325  EmptyOptional(),
326  EmptyOptional(),
327  reasonIfUnsupported);
328 }
329 
331  Optional<std::string&> reasonIfUnsupported)
332 {
333  TensorInfos infos{output};
334 
335  return m_LayerSupport->IsLayerSupported(LayerType::Constant,
336  infos,
337  BaseDescriptor(),
338  EmptyOptional(),
339  EmptyOptional(),
340  reasonIfUnsupported);
341 }
342 
344  const TensorInfo& output,
345  Optional<std::string&> reasonIfUnsupported)
346 {
347  TensorInfos infos{input, output};
348 
349  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
350  infos,
351  BaseDescriptor(),
352  EmptyOptional(),
353  EmptyOptional(),
354  reasonIfUnsupported);
355 }
356 
358  const TensorInfo& output,
359  Optional<std::string&> reasonIfUnsupported)
360 {
361  TensorInfos infos{input, output};
362 
363  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
364  infos,
365  BaseDescriptor(),
366  EmptyOptional(),
367  EmptyOptional(),
368  reasonIfUnsupported);
369 }
370 
372  const TensorInfo& output,
373  const Convolution2dDescriptor& descriptor,
374  const TensorInfo& weights,
375  const Optional<TensorInfo>& biases,
376  Optional<std::string&> reasonIfUnsupported)
377 {
378  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
379  TensorInfos infos{input, output, weights, biasesVal};
380 
382  if (!m_BackendId.IsUndefined())
383  {
384  capability = GetCapability("NonConstWeights", m_BackendId);
385  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
386  {
387  if (!weights.IsConstant())
388  {
389  if (reasonIfUnsupported.has_value())
390  {
391  reasonIfUnsupported.value() =
392  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
393  "Convolution2d weights are set as dynamic (non constant). ";
394  }
395  return false;
396  }
397  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
398  {
399  if (reasonIfUnsupported.has_value())
400  {
401  reasonIfUnsupported.value() =
402  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
403  "Convolution2d biases are set as dynamic (non constant). ";
404  }
405  return false;
406  }
407 
408  // At the first stage we will only print a warning. this is to give
409  // backend developers a chance to adopt and read weights from input slots.
410  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
411  "If you are a backend developer please find more information in our "
412  "doxygen documentation on github https://github.com/ARM-software/armnn "
413  "under the keyword 'ConstTensorsAsInputs'.";
414  }
415  }
416 
417  return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
418  infos,
419  descriptor,
420  EmptyOptional(),
421  EmptyOptional(),
422  reasonIfUnsupported);
423 }
424 
426  const TensorInfo& output,
427  const Convolution3dDescriptor& descriptor,
428  const TensorInfo& weights,
429  const Optional<TensorInfo>& biases,
430  Optional<std::string&> reasonIfUnsupported)
431 {
432  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
433  TensorInfos infos{input, output, weights, biasesVal};
434 
435  return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
436  infos,
437  descriptor,
438  EmptyOptional(),
439  EmptyOptional(),
440  reasonIfUnsupported);
441 }
442 
444  const TensorInfo& output,
445  Optional<std::string&> reasonIfUnsupported)
446 {
447  TensorInfos infos{input, output};
448 
449  return m_LayerSupport->IsLayerSupported(LayerType::Debug,
450  infos,
451  BaseDescriptor(),
452  EmptyOptional(),
453  EmptyOptional(),
454  reasonIfUnsupported);
455 }
456 
458  const TensorInfo& output,
459  const DepthToSpaceDescriptor& descriptor,
460  Optional<std::string&> reasonIfUnsupported)
461 {
462  TensorInfos infos{input, output};
463 
464  return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
465  infos,
466  descriptor,
467  EmptyOptional(),
468  EmptyOptional(),
469  reasonIfUnsupported);
470 }
471 
473  const TensorInfo& input,
474  const TensorInfo& output,
475  const DepthwiseConvolution2dDescriptor& descriptor,
476  const TensorInfo& weights,
477  const Optional<TensorInfo>& biases,
478  Optional<std::string&> reasonIfUnsupported)
479 {
480  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
481  TensorInfos infos{input, output, weights, biasesVal};
482 
484  if (!m_BackendId.IsUndefined())
485  {
486  capability = GetCapability("NonConstWeights", m_BackendId);
487  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
488  {
489  if (!weights.IsConstant())
490  {
491  if (reasonIfUnsupported.has_value())
492  {
493  reasonIfUnsupported.value() =
494  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
495  "DepthwiseConvolution2d weights are set as dynamic (non constant). ";
496  }
497  return false;
498  }
499  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
500  {
501  if (reasonIfUnsupported.has_value())
502  {
503  reasonIfUnsupported.value() =
504  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
505  "DepthwiseConvolution2d biases are set as dynamic (non constant). ";
506  }
507  return false;
508  }
509  // At the first stage we will only print a warning. this is to give
510  // backend developers a chance to adopt and read weights from input slots.
511  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
512  "If you are a backend developer please find more information in our "
513  "doxygen documentation on github https://github.com/ARM-software/armnn "
514  "under the keyword 'ConstTensorsAsInputs'.";
515  }
516  }
517 
518  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
519  infos,
520  descriptor,
521  EmptyOptional(),
522  EmptyOptional(),
523  reasonIfUnsupported);
524 }
525 
527  const TensorInfo& output,
528  Optional<std::string&> reasonIfUnsupported)
529 {
530  TensorInfos infos{input, output};
531 
532  return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
533  infos,
534  BaseDescriptor(),
535  EmptyOptional(),
536  EmptyOptional(),
537  reasonIfUnsupported);
538 }
539 
541  const TensorInfo& scores,
542  const TensorInfo& anchors,
543  const TensorInfo& detectionBoxes,
544  const TensorInfo& detectionClasses,
545  const TensorInfo& detectionScores,
546  const TensorInfo& numDetections,
547  const DetectionPostProcessDescriptor& descriptor,
548  Optional<std::string&> reasonIfUnsupported)
549 {
550  TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
551 
552  return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
553  infos,
554  descriptor,
555  EmptyOptional(),
556  EmptyOptional(),
557  reasonIfUnsupported);
558 }
559 
561  const TensorInfo& input,
562  const TensorInfo& output,
563  const DepthwiseConvolution2dDescriptor& descriptor,
564  const TensorInfo& weights,
565  const Optional<TensorInfo>& biases,
566  Optional<std::string&> reasonIfUnsupported)
567 {
568  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
569  TensorInfos infos{input, output, weights, biasesVal};
570 
572  if (!m_BackendId.IsUndefined())
573  {
574  capability = GetCapability("NonConstWeights", m_BackendId);
575  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
576  {
577  if (!weights.IsConstant())
578  {
579  if (reasonIfUnsupported.has_value())
580  {
581  reasonIfUnsupported.value() =
582  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
583  "DilatedDepthwiseConvolution2d weights are set as dynamic (non constant). ";
584  }
585  return false;
586  }
587  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
588  {
589  if (reasonIfUnsupported.has_value())
590  {
591  reasonIfUnsupported.value() =
592  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
593  "DilatedDepthwiseConvolution2d biases are set as dynamic (non constant). ";
594  }
595  return false;
596  }
597  // At the first stage we will only print a warning. this is to give
598  // backend developers a chance to adopt and read weights from input slots.
599  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
600  "If you are a backend developer please find more information in our "
601  "doxygen documentation on github https://github.com/ARM-software/armnn "
602  "under the keyword 'ConstTensorsAsInputs'.";
603  }
604  }
605 
606  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
607  infos,
608  descriptor,
609  EmptyOptional(),
610  EmptyOptional(),
611  reasonIfUnsupported);
612 }
613 
615  const TensorInfo& input1,
616  const TensorInfo& output,
617  Optional<std::string&> reasonIfUnsupported)
618 {
619  TensorInfos infos{input0, input1, output};
620 
621  return m_LayerSupport->IsLayerSupported(LayerType::Division,
622  infos,
623  BaseDescriptor(),
624  EmptyOptional(),
625  EmptyOptional(),
626  reasonIfUnsupported);
627 }
628 
630  const TensorInfo &input1,
631  const TensorInfo &output,
632  const ElementwiseBinaryDescriptor &descriptor,
633  Optional<std::string &> reasonIfUnsupported)
634 {
635  TensorInfos infos{input0, input1, output};
636 
637  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
638  infos,
639  descriptor,
640  EmptyOptional(),
641  EmptyOptional(),
642  reasonIfUnsupported);
643 }
644 
646  const TensorInfo& output,
647  const ElementwiseUnaryDescriptor& descriptor,
648  Optional<std::string&> reasonIfUnsupported)
649 {
650  TensorInfos infos{input, output};
651 
652  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
653  infos,
654  descriptor,
655  EmptyOptional(),
656  EmptyOptional(),
657  reasonIfUnsupported);
658 }
659 
661  const FakeQuantizationDescriptor& descriptor,
662  Optional<std::string&> reasonIfUnsupported)
663 {
664  TensorInfos infos{input};
665 
666  return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
667  infos,
668  descriptor,
669  EmptyOptional(),
670  EmptyOptional(),
671  reasonIfUnsupported);
672 }
673 
675  const TensorInfo& output,
676  const FillDescriptor& descriptor,
677  Optional<std::string&> reasonIfUnsupported)
678 {
679  TensorInfos infos{input, output};
680 
681  return m_LayerSupport->IsLayerSupported(LayerType::Fill,
682  infos,
683  descriptor,
684  EmptyOptional(),
685  EmptyOptional(),
686  reasonIfUnsupported);
687 }
688 
690  const TensorInfo& output,
691  Optional<std::string&> reasonIfUnsupported)
692 {
693  TensorInfos infos{input, output};
694 
695  return m_LayerSupport->IsLayerSupported(LayerType::Floor,
696  infos,
697  BaseDescriptor(),
698  EmptyOptional(),
699  EmptyOptional(),
700  reasonIfUnsupported);
701 }
702 
704  const TensorInfo& output,
705  const TensorInfo& weights,
706  const TensorInfo& biases,
707  const FullyConnectedDescriptor& descriptor,
708  Optional<std::string&> reasonIfUnsupported)
709 {
710  TensorInfos infos{input, output, weights, biases};
711 
713  if (!m_BackendId.IsUndefined())
714  {
715  capability = GetCapability("NonConstWeights", m_BackendId);
716  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
717  {
718  if (!descriptor.m_ConstantWeights)
719  {
720  if (reasonIfUnsupported.has_value())
721  {
722  reasonIfUnsupported.value() =
723  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
724  "FullyConnected descriptor indicates that weights are dynamic (non constant). ";
725  }
726  return false;
727  }
728  if (!weights.IsConstant())
729  {
730  if (reasonIfUnsupported.has_value())
731  {
732  reasonIfUnsupported.value() =
733  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
734  "FullyConnected weights are set as dynamic (non constant). ";
735  }
736 
737  return false;
738  }
739  if (descriptor.m_BiasEnabled && !biases.IsConstant())
740  {
741  if (reasonIfUnsupported.has_value())
742  {
743  reasonIfUnsupported.value() =
744  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
745  "FullyConnected biases are set as dynamic (non constant). ";
746  }
747  return false;
748  }
749 
750  // At the first stage we will only print a warning. this is to give
751  // backend developers a chance to adopt and read weights from input slots.
752  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
753  "If you are a backend developer please find more information in our "
754  "doxygen documentation on github https://github.com/ARM-software/armnn "
755  "under the keyword 'ConstTensorsAsInputs'.";
756  }
757  }
758 
759  return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
760  infos,
761  descriptor,
762  EmptyOptional(),
763  EmptyOptional(),
764  reasonIfUnsupported);
765 }
766 
767 bool LayerSupportHandle::IsFusedSupported(const std::vector<std::reference_wrapper<TensorInfo>>& inputs,
768  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
769  const FusedDescriptor& descriptor,
770  Optional<std::string&> reasonIfUnsupported)
771 {
772  TensorInfos infos;
773  infos.reserve(inputs.size() + outputs.size());
774  for (TensorInfo inInfo : inputs)
775  {
776  infos.emplace_back(inInfo);
777  }
778  for (TensorInfo outInfo : outputs)
779  {
780  infos.emplace_back(outInfo);
781  }
782 
783  return m_LayerSupport->IsLayerSupported(LayerType::Fused,
784  infos,
785  descriptor,
786  EmptyOptional(),
787  EmptyOptional(),
788  reasonIfUnsupported);
789 }
790 
792  const TensorInfo& input1,
793  const TensorInfo& output,
794  const GatherDescriptor& descriptor,
795  Optional<std::string&> reasonIfUnsupported)
796 {
797  TensorInfos infos{input0, input1, output};
798 
799  return m_LayerSupport->IsLayerSupported(LayerType::Gather,
800  infos,
801  descriptor,
802  EmptyOptional(),
803  EmptyOptional(),
804  reasonIfUnsupported);
805 }
806 
808  const TensorInfo& input1,
809  const TensorInfo& output,
810  Optional<std::string&> reasonIfUnsupported)
811 {
812  TensorInfos infos{input0, input1, output};
813 
814  return m_LayerSupport->IsLayerSupported(LayerType::GatherNd,
815  infos,
816  BaseDescriptor(),
817  EmptyOptional(),
818  EmptyOptional(),
819  reasonIfUnsupported);
820 }
821 
823  Optional<std::string&> reasonIfUnsupported)
824 {
825  TensorInfos infos{input};
826 
827  return m_LayerSupport->IsLayerSupported(LayerType::Input,
828  infos,
829  BaseDescriptor(),
830  EmptyOptional(),
831  EmptyOptional(),
832  reasonIfUnsupported);
833 }
834 
836  const TensorInfo& input,
837  const TensorInfo& output,
838  const InstanceNormalizationDescriptor& descriptor,
839  Optional<std::string&> reasonIfUnsupported)
840 {
841  TensorInfos infos{input, output};
842 
843  return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
844  infos,
845  descriptor,
846  EmptyOptional(),
847  EmptyOptional(),
848  reasonIfUnsupported);
849 }
850 
852  const TensorInfo& output,
853  const L2NormalizationDescriptor& descriptor,
854  Optional<std::string&> reasonIfUnsupported)
855 {
856  TensorInfos infos{input, output};
857 
858  return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
859  infos,
860  descriptor,
861  EmptyOptional(),
862  EmptyOptional(),
863  reasonIfUnsupported);
864 }
865 
867  const TensorInfo& input1,
868  const TensorInfo& output,
869  const LogicalBinaryDescriptor& descriptor,
870  Optional<std::string&> reasonIfUnsupported)
871 {
872  TensorInfos infos{input0, input1, output};
873 
874  return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
875  infos,
876  descriptor,
877  EmptyOptional(),
878  EmptyOptional(),
879  reasonIfUnsupported);
880 }
881 
883  const TensorInfo& output,
884  const ElementwiseUnaryDescriptor& descriptor,
885  Optional<std::string&> reasonIfUnsupported)
886 {
887  TensorInfos infos{input, output};
888 
889  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
890  infos,
891  descriptor,
892  EmptyOptional(),
893  EmptyOptional(),
894  reasonIfUnsupported);
895 }
896 
898  const TensorInfo& output,
899  const LogSoftmaxDescriptor& descriptor,
900  Optional<std::string&> reasonIfUnsupported)
901 {
902  TensorInfos infos{input, output};
903 
904  return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
905  infos,
906  descriptor,
907  EmptyOptional(),
908  EmptyOptional(),
909  reasonIfUnsupported);
910 }
911 
913  const TensorInfo& outputStateIn,
914  const TensorInfo& cellStateIn,
915  const TensorInfo& scratchBuffer,
916  const TensorInfo& outputStateOut,
917  const TensorInfo& cellStateOut,
918  const TensorInfo& output,
919  const LstmDescriptor& descriptor,
920  const LstmInputParamsInfo& paramsInfo,
921  Optional<std::string&> reasonIfUnsupported)
922 {
923  TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
924 
925  return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
926  infos,
927  descriptor,
928  paramsInfo,
929  EmptyOptional(),
930  reasonIfUnsupported);
931 }
932 
934  const TensorInfo& input1,
935  const TensorInfo& output,
936  Optional<std::string&> reasonIfUnsupported)
937 {
938  TensorInfos infos{input0, input1, output};
939 
940  return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
941  infos,
942  BaseDescriptor(),
943  EmptyOptional(),
944  EmptyOptional(),
945  reasonIfUnsupported);
946 }
947 
949  const TensorInfo& output,
950  const MeanDescriptor& descriptor,
951  Optional<std::string&> reasonIfUnsupported)
952 {
953  TensorInfos infos{input, output};
954 
955  return m_LayerSupport->IsLayerSupported(LayerType::Mean,
956  infos,
957  descriptor,
958  EmptyOptional(),
959  EmptyOptional(),
960  reasonIfUnsupported);
961 }
962 
964  const TensorInfo& output,
965  Optional<std::string&> reasonIfUnsupported)
966 {
967  TensorInfos infos{input, output};
968 
969  return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
970  infos,
971  BaseDescriptor(),
972  EmptyOptional(),
973  EmptyOptional(),
974  reasonIfUnsupported);
975 }
976 
978  const TensorInfo& output,
979  Optional<std::string&> reasonIfUnsupported)
980 {
981  TensorInfos infos{input, output};
982 
983  return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
984  infos,
985  BaseDescriptor(),
986  EmptyOptional(),
987  EmptyOptional(),
988  reasonIfUnsupported);
989 }
990 
992  const TensorInfo& input1,
993  const TensorInfo& output,
994  Optional<std::string&> reasonIfUnsupported)
995 {
996  TensorInfos infos{input0, input1, output};
997 
998  return m_LayerSupport->IsLayerSupported(LayerType::Merge,
999  infos,
1000  BaseDescriptor(),
1001  EmptyOptional(),
1002  EmptyOptional(),
1003  reasonIfUnsupported);
1004 }
1005 
1007  const TensorInfo& input1,
1008  const TensorInfo& output,
1009  Optional<std::string&> reasonIfUnsupported)
1010 {
1011  TensorInfos infos{input0, input1, output};
1012 
1013  return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
1014  infos,
1015  BaseDescriptor(),
1016  EmptyOptional(),
1017  EmptyOptional(),
1018  reasonIfUnsupported);
1019 }
1020 
1022  const TensorInfo& input1,
1023  const TensorInfo& output,
1024  Optional<std::string&> reasonIfUnsupported)
1025 {
1026  TensorInfos infos{input0, input1, output};
1027 
1028  return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
1029  infos,
1030  BaseDescriptor(),
1031  EmptyOptional(),
1032  EmptyOptional(),
1033  reasonIfUnsupported);
1034 }
1035 
1037  const TensorInfo& output,
1038  const NormalizationDescriptor& descriptor,
1039  Optional<std::string&> reasonIfUnsupported)
1040 {
1041  TensorInfos infos{input, output};
1042 
1043  return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
1044  infos,
1045  descriptor,
1046  EmptyOptional(),
1047  EmptyOptional(),
1048  reasonIfUnsupported);
1049 }
1050 
1052  Optional<std::string&> reasonIfUnsupported)
1053 {
1054  TensorInfos infos{output};
1055 
1056  return m_LayerSupport->IsLayerSupported(LayerType::Output,
1057  infos,
1058  BaseDescriptor(),
1059  EmptyOptional(),
1060  EmptyOptional(),
1061  reasonIfUnsupported);
1062 }
1063 
1065  const TensorInfo& output,
1066  const PadDescriptor& descriptor,
1067  Optional<std::string&> reasonIfUnsupported)
1068 {
1069  TensorInfos infos{input, output};
1070 
1071  return m_LayerSupport->IsLayerSupported(LayerType::Pad,
1072  infos,
1073  descriptor,
1074  EmptyOptional(),
1075  EmptyOptional(),
1076  reasonIfUnsupported);
1077 }
1078 
1080  const TensorInfo& output,
1081  const PermuteDescriptor& descriptor,
1082  Optional<std::string&> reasonIfUnsupported)
1083 {
1084  TensorInfos infos{input, output};
1085 
1086  return m_LayerSupport->IsLayerSupported(LayerType::Permute,
1087  infos,
1088  descriptor,
1089  EmptyOptional(),
1090  EmptyOptional(),
1091  reasonIfUnsupported);
1092 }
1093 
1095  const TensorInfo& output,
1096  const Pooling2dDescriptor& descriptor,
1097  Optional<std::string&> reasonIfUnsupported)
1098 {
1099  TensorInfos infos{input, output};
1100 
1101  return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
1102  infos,
1103  descriptor,
1104  EmptyOptional(),
1105  EmptyOptional(),
1106  reasonIfUnsupported);
1107 }
1108 
1110  const TensorInfo& output,
1111  const Pooling3dDescriptor& descriptor,
1112  Optional<std::string&> reasonIfUnsupported)
1113 {
1114  TensorInfos infos{input, output};
1115 
1116  return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
1117  infos,
1118  descriptor,
1119  EmptyOptional(),
1120  EmptyOptional(),
1121  reasonIfUnsupported);
1122 }
1123 
1125  const PreCompiledDescriptor& descriptor,
1126  Optional<std::string&> reasonIfUnsupported)
1127 {
1128  TensorInfos infos{input};
1129 
1130  return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
1131  infos,
1132  descriptor,
1133  EmptyOptional(),
1134  EmptyOptional(),
1135  reasonIfUnsupported);
1136 }
1137 
1139  const TensorInfo& alpha,
1140  const TensorInfo& output,
1141  Optional<std::string&> reasonIfUnsupported)
1142 {
1143  TensorInfos infos{input, alpha, output};
1144 
1145  return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
1146  infos,
1147  BaseDescriptor(),
1148  EmptyOptional(),
1149  EmptyOptional(),
1150  reasonIfUnsupported);
1151 }
1152 
1154  const TensorInfo& output,
1155  Optional<std::string&> reasonIfUnsupported)
1156 {
1157  TensorInfos infos{input, output};
1158 
1159  return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
1160  infos,
1161  BaseDescriptor(),
1162  EmptyOptional(),
1163  EmptyOptional(),
1164  reasonIfUnsupported);
1165 }
1166 
1168  const TensorInfo& previousOutputIn,
1169  const TensorInfo& previousCellStateIn,
1170  const TensorInfo& outputStateOut,
1171  const TensorInfo& cellStateOut,
1172  const TensorInfo& output,
1173  const QLstmDescriptor& descriptor,
1174  const LstmInputParamsInfo& paramsInfo,
1175  Optional<std::string&> reasonIfUnsupported)
1176 {
1177  TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
1178 
1179  return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
1180  infos,
1181  descriptor,
1182  paramsInfo,
1183  EmptyOptional(),
1184  reasonIfUnsupported);
1185 }
1186 
1188  const TensorInfo& previousCellStateIn,
1189  const TensorInfo& previousOutputIn,
1190  const TensorInfo& cellStateOut,
1191  const TensorInfo& output,
1192  const QuantizedLstmInputParamsInfo& paramsInfo,
1193  Optional<std::string&> reasonIfUnsupported)
1194 {
1195  TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
1196 
1197  return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
1198  infos,
1199  BaseDescriptor(),
1200  EmptyOptional(),
1201  paramsInfo,
1202  reasonIfUnsupported);
1203 }
1204 
1206  const TensorInfo& output,
1207  Optional<std::string&> reasonIfUnsupported)
1208 {
1209  TensorInfos infos{input, output};
1210 
1211  return m_LayerSupport->IsLayerSupported(LayerType::Rank,
1212  infos,
1213  BaseDescriptor(),
1214  EmptyOptional(),
1215  EmptyOptional(),
1216  reasonIfUnsupported);
1217 }
1218 
1220  const TensorInfo& output,
1221  const ReduceDescriptor& descriptor,
1222  Optional<std::string&> reasonIfUnsupported)
1223 {
1224  TensorInfos infos{input, output};
1225 
1226  return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
1227  infos,
1228  descriptor,
1229  EmptyOptional(),
1230  EmptyOptional(),
1231  reasonIfUnsupported);
1232 }
1233 
1235  const TensorInfo& output,
1236  const ReshapeDescriptor& descriptor,
1237  Optional<std::string&> reasonIfUnsupported)
1238 {
1239  TensorInfos infos{input, output};
1240 
1241  return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
1242  infos,
1243  descriptor,
1244  EmptyOptional(),
1245  EmptyOptional(),
1246  reasonIfUnsupported);
1247 }
1248 
1250  const TensorInfo& output,
1251  const ResizeDescriptor& descriptor,
1252  Optional<std::string&> reasonIfUnsupported)
1253 {
1254  TensorInfos infos{input, output};
1255 
1256  return m_LayerSupport->IsLayerSupported(LayerType::Resize,
1257  infos,
1258  descriptor,
1259  EmptyOptional(),
1260  EmptyOptional(),
1261  reasonIfUnsupported);
1262 }
1263 
1265  const armnn::TensorInfo &input1,
1266  const armnn::TensorInfo &output,
1267  Optional<std::string &> reasonIfUnsupported)
1268 {
1269  TensorInfos infos{input0, input1, output};
1270 
1271  return m_LayerSupport->IsLayerSupported(LayerType::ReverseV2,
1272  infos,
1273  BaseDescriptor(),
1274  EmptyOptional(),
1275  EmptyOptional(),
1276  reasonIfUnsupported);
1277 }
1278 
1280  const TensorInfo& indices,
1281  const TensorInfo& updates,
1282  const TensorInfo& output,
1283  const armnn::ScatterNdDescriptor &descriptor,
1284  Optional<std::string&> reasonIfUnsupported)
1285 {
1286  TensorInfos infos{input, indices, updates, output};
1287 
1288  return m_LayerSupport->IsLayerSupported(LayerType::ScatterNd,
1289  infos,
1290  descriptor,
1291  EmptyOptional(),
1292  EmptyOptional(),
1293  reasonIfUnsupported);
1294 }
1295 
1297  const TensorInfo& output,
1298  Optional<std::string&> reasonIfUnsupported)
1299 {
1300  TensorInfos infos{input, output};
1301 
1302  return m_LayerSupport->IsLayerSupported(LayerType::Shape,
1303  infos,
1304  BaseDescriptor(),
1305  EmptyOptional(),
1306  EmptyOptional(),
1307  reasonIfUnsupported);
1308 }
1309 
1311  const TensorInfo& output,
1312  const SliceDescriptor& descriptor,
1313  Optional<std::string&> reasonIfUnsupported)
1314 {
1315  TensorInfos infos{input, output};
1316 
1317  return m_LayerSupport->IsLayerSupported(LayerType::Slice,
1318  infos,
1319  descriptor,
1320  EmptyOptional(),
1321  EmptyOptional(),
1322  reasonIfUnsupported);
1323 }
1324 
1326  const TensorInfo& output,
1327  const SoftmaxDescriptor& descriptor,
1328  Optional<std::string&> reasonIfUnsupported)
1329 {
1330  TensorInfos infos{input, output};
1331 
1332  return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
1333  infos,
1334  descriptor,
1335  EmptyOptional(),
1336  EmptyOptional(),
1337  reasonIfUnsupported);
1338 }
1339 
1341  const TensorInfo& output,
1342  const SpaceToBatchNdDescriptor& descriptor,
1343  Optional<std::string&> reasonIfUnsupported)
1344 {
1345  TensorInfos infos{input, output};
1346 
1347  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
1348  infos,
1349  descriptor,
1350  EmptyOptional(),
1351  EmptyOptional(),
1352  reasonIfUnsupported);
1353 }
1354 
1356  const TensorInfo& output,
1357  const SpaceToDepthDescriptor& descriptor,
1358  Optional<std::string&> reasonIfUnsupported)
1359 {
1360  TensorInfos infos{input, output};
1361 
1362  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
1363  infos,
1364  descriptor,
1365  EmptyOptional(),
1366  EmptyOptional(),
1367  reasonIfUnsupported);
1368 }
1369 
1371  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1372  const ViewsDescriptor& descriptor,
1373  Optional<std::string&> reasonIfUnsupported)
1374 {
1375  TensorInfos infos{input};
1376  for (TensorInfo outInfo : outputs)
1377  {
1378  infos.push_back(outInfo);
1379  }
1380 
1381  return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
1382  infos,
1383  descriptor,
1384  EmptyOptional(),
1385  EmptyOptional(),
1386  reasonIfUnsupported);
1387 }
1388 
1389 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1390  const TensorInfo& output,
1391  const StackDescriptor& descriptor,
1392  Optional<std::string&> reasonIfUnsupported)
1393 {
1394  TensorInfos infos;
1395  for (const TensorInfo* inputInfo : inputs)
1396  {
1397  infos.push_back(*inputInfo);
1398  }
1399  infos.push_back(output);
1400 
1401  return m_LayerSupport->IsLayerSupported(LayerType::Stack,
1402  infos,
1403  descriptor,
1404  EmptyOptional(),
1405  EmptyOptional(),
1406  reasonIfUnsupported);
1407 }
1408 
1409 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
1410  const std::vector<const TensorInfo*>& outputs,
1411  const StandInDescriptor& descriptor,
1412  Optional<std::string&> reasonIfUnsupported)
1413 {
1414  TensorInfos infos;
1415  for (const TensorInfo* inputInfo : inputs)
1416  {
1417  infos.push_back(*inputInfo);
1418  }
1419  for (const TensorInfo* outputInfo : outputs)
1420  {
1421  infos.push_back(*outputInfo);
1422  }
1423 
1424  return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
1425  infos,
1426  descriptor,
1427  EmptyOptional(),
1428  EmptyOptional(),
1429  reasonIfUnsupported);
1430 }
1431 
1432 
1434  const TensorInfo& output,
1435  const StridedSliceDescriptor& descriptor,
1436  Optional<std::string&> reasonIfUnsupported)
1437 {
1438  TensorInfos infos{input, output};
1439 
1440  return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
1441  infos,
1442  descriptor,
1443  EmptyOptional(),
1444  EmptyOptional(),
1445  reasonIfUnsupported);
1446 }
1447 
1449  const TensorInfo& input1,
1450  const TensorInfo& output,
1451  Optional<std::string&> reasonIfUnsupported)
1452 {
1453  TensorInfos infos{input0, input1, output};
1454 
1455  return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
1456  infos,
1457  BaseDescriptor(),
1458  EmptyOptional(),
1459  EmptyOptional(),
1460  reasonIfUnsupported);
1461 }
1462 
1464  const TensorInfo& input1,
1465  const TensorInfo& output0,
1466  const TensorInfo& output1,
1467  Optional<std::string&> reasonIfUnsupported)
1468 {
1469  TensorInfos infos{input0, input1, output0, output1};
1470 
1471  return m_LayerSupport->IsLayerSupported(LayerType::Switch,
1472  infos,
1473  BaseDescriptor(),
1474  EmptyOptional(),
1475  EmptyOptional(),
1476  reasonIfUnsupported);
1477 }
1478 
1480  const TensorInfo& output,
1481  const armnn::TileDescriptor &descriptor,
1482  Optional<std::string&> reasonIfUnsupported)
1483 {
1484  TensorInfos infos{input, output};
1485 
1486  return m_LayerSupport->IsLayerSupported(LayerType::Tile,
1487  infos,
1488  descriptor,
1489  EmptyOptional(),
1490  EmptyOptional(),
1491  reasonIfUnsupported);
1492 }
1493 
1495  const TensorInfo& input,
1496  const TensorInfo& output,
1497  const TransposeConvolution2dDescriptor& descriptor,
1498  const TensorInfo& weights,
1499  const Optional<TensorInfo>& biases,
1500  Optional<std::string&> reasonIfUnsupported)
1501 {
1502  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
1503  TensorInfos infos{input, output, weights, biasesVal};
1504 
1505  return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
1506  infos,
1507  descriptor,
1508  EmptyOptional(),
1509  EmptyOptional(),
1510  reasonIfUnsupported);
1511 }
1512 
1514  const TensorInfo& output,
1515  const TransposeDescriptor& descriptor,
1516  Optional<std::string&> reasonIfUnsupported)
1517 {
1518  TensorInfos infos{input, output};
1519 
1520  return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
1521  infos,
1522  descriptor,
1523  EmptyOptional(),
1524  EmptyOptional(),
1525  reasonIfUnsupported);
1526 }
1527 
1529  const TensorInfo& outputStateIn,
1530  const TensorInfo& cellStateIn,
1531  const TensorInfo& outputStateOut,
1532  const TensorInfo& cellStateOut,
1533  const TensorInfo& output,
1534  const LstmDescriptor& descriptor,
1535  const LstmInputParamsInfo& paramsInfo,
1536  Optional<std::string&> reasonIfUnsupported)
1537 {
1538  TensorInfos infos{input, outputStateIn, cellStateIn, outputStateOut, cellStateOut, output};
1539 
1540  return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
1541  infos,
1542  descriptor,
1543  paramsInfo,
1544  EmptyOptional(),
1545  reasonIfUnsupported);
1546 }
1547 
1548 }
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
bool IsUndefined() const
Definition: BackendId.hpp:141
std::string AsString() const
unsigned int AsUnsignedInt() const
bool AsBool() const
Value getters.
bool IsBool() const
Type getters.
bool IsBackendRegistered(const BackendId &id) const
FactoryFunction GetFactory(const BackendId &id) const
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsStandInSupported(const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsBroadcastToSupported(const TensorInfo &input, const TensorInfo &output, const BroadcastToDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported)
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsReverseV2Supported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsScatterNdSupported(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &updates, const TensorInfo &output, const ScatterNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsElementwiseBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsBatchMatMulSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsFusedSupported(const std::vector< std::reference_wrapper< TensorInfo >> &inputs, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const FusedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool has_value() const noexcept
Definition: Optional.hpp:53
bool IsConstant() const
Definition: Tensor.cpp:513
Copyright (c) 2021 ARM Limited and Contributors.
std::vector< TensorInfo > TensorInfos
bool HasMatchingCapability(const BackendOptions::BackendOption &capability, const BackendCapabilities &capabilities)
Convenience function to check if a given capability matches a capability in a BackendCapabilities str...
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
BackendRegistry & BackendRegistryInstance()
unsigned int GetNumberOfCacheFiles(const armnn::BackendId &backend)
Returns the number of cached files if backend supports caching.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:37
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:68
Struct for the users to pass backend specific options.
const BackendOption & GetOption(size_t idx) const
size_t GetOptionCount() const noexcept
Base class for all descriptors.
Definition: Descriptors.hpp:23
A BatchMatMulDescriptor for the BatchMatMul operator.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
A ChannelShuffleDescriptor for the ChannelShuffle operator.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:90
A Convolution2dDescriptor for the Convolution2dLayer.
bool m_BiasEnabled
Enable/disable bias.
A Convolution3dDescriptor for the Convolution3dLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
bool m_BiasEnabled
Enable/disable bias.
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
A FillDescriptor for the FillLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_ConstantWeights
Enable/disable constant weights and biases.
bool m_BiasEnabled
Enable/disable bias.
A FusedDescriptor for the FusedLayer.
A GatherDescriptor for the GatherLayer.
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A L2NormalizationDescriptor for the L2NormalizationLayer.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
An LstmDescriptor for the LstmLayer.
A MeanDescriptor for the MeanLayer.
A NormalizationDescriptor for the NormalizationLayer.
An OriginsDescriptor for the ConcatLayer.
A PadDescriptor for the PadLayer.
A PermuteDescriptor for the PermuteLayer.
A Pooling2dDescriptor for the Pooling2dLayer.
A Pooling3dDescriptor for the Pooling3dLayer.
A PreCompiledDescriptor for the PreCompiledLayer.
A QLstmDescriptor for the QLstmLayer.
A ReduceDescriptor for the REDUCE operators.
A ReshapeDescriptor for the ReshapeLayer.
A ResizeDescriptor for the ResizeLayer.
A ScatterNdDescriptor for the ScatterNdLayer.
A SliceDescriptor for the SliceLayer.
A SoftmaxDescriptor for the SoftmaxLayer.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
A StackDescriptor for the StackLayer.
A StandInDescriptor for the StandIn layer.
A StridedSliceDescriptor for the StridedSliceLayer.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
A TransposeDescriptor for the TransposeLayer.
A ViewsDescriptor for the SplitterLayer.