ArmNN
 24.08
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
12 #include <armnn/BackendHelper.hpp>
16 
18 
19 #include <sstream>
20 
21 namespace armnn
22 {
23 
24 namespace
25 {
26 using LayerList = std::list<Layer*>;
27 using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
28 
29 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
30 {
31  if (!type)
32  {
33  return info;
34  }
35 
36  return TensorInfo(info.GetShape(),
37  type.value(),
38  info.GetQuantizationScale(),
39  info.GetQuantizationOffset(),
40  info.IsConstant());
41 }
42 
43 } // anonymous namespace
44 
46 {
47  if (!weightsType)
48  {
49  return weightsType;
50  }
51 
52  switch(weightsType.value())
53  {
57  return weightsType;
63  default:
64  throw InvalidArgumentException("GetBiasTypeFromWeightsType(): Unsupported data type.");
65  }
66  return armnn::EmptyOptional();
67 }
68 
69 
70 bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
71  const IConnectableLayer& connectableLayer,
72  Optional<DataType> dataType,
73  std::string& outReasonIfUnsupported,
74  const ModelOptions& modelOptions)
75 {
76  Optional<std::string&> reason = outReasonIfUnsupported;
77  bool result;
78  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
79 
80  auto const& backendRegistry = BackendRegistryInstance();
81  if (!backendRegistry.IsBackendRegistered(backendId))
82  {
83  std::stringstream ss;
84  ss << connectableLayer.GetName() << " is not supported on " << backendId
85  << " because this backend is not registered.";
86 
87  outReasonIfUnsupported = ss.str();
88  return false;
89  }
90 
91  auto backendFactory = backendRegistry.GetFactory(backendId);
92  auto backendObject = backendFactory();
93  auto layerSupport = backendObject->GetLayerSupport(modelOptions);
94  auto layerSupportObject = LayerSupportHandle(layerSupport, backendId);
95 
96  switch(layer.GetType())
97  {
99  {
100  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
101  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
102  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
103  result = layerSupportObject.IsActivationSupported(
104  OverrideDataType(input, dataType),
105  OverrideDataType(output, dataType),
106  cLayer->GetParameters(),
107  reason);
108  break;
109  }
110  case LayerType::Addition:
111  {
113  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
114  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
115  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
116  result = layerSupportObject.IsAdditionSupported(
117  OverrideDataType(input0, dataType),
118  OverrideDataType(input1, dataType),
119  OverrideDataType(output, dataType),
120  reason);
122  break;
123  }
125  {
126  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
127  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
128 
129  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
130  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
131  result = layerSupportObject.IsArgMinMaxSupported(
132  OverrideDataType(input, dataType),
133  OverrideDataType(output, DataType::Signed32),
134  descriptor,
135  reason);
136  break;
137  }
139  {
140  auto cLayer = PolymorphicDowncast<const BatchMatMulLayer*>(&layer);
141  const BatchMatMulDescriptor& descriptor = cLayer->GetParameters();
142 
143  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
144  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
145  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
146  result = layerSupportObject.IsBatchMatMulSupported(
147  OverrideDataType(input0, dataType),
148  OverrideDataType(input1, dataType),
149  OverrideDataType(output, dataType),
150  descriptor,
151  reason);
152  break;
153  }
155  {
156  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
157  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
158  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
160  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
161  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
162  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
163  result = layerSupportObject.IsBatchNormalizationSupported(
164  OverrideDataType(input, dataType),
165  OverrideDataType(output, dataType),
166  OverrideDataType(mean, dataType),
167  OverrideDataType(var, dataType),
168  OverrideDataType(beta, dataType),
169  OverrideDataType(gamma, dataType),
170  cLayer->GetParameters(),
171  reason);
172  break;
173  }
175  {
176  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
177  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
178  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
179 
180  result = layerSupportObject.IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
181  OverrideDataType(output, dataType),
182  cLayer->GetParameters(),
183  reason);
184  break;
185  }
187  {
188  auto cLayer = PolymorphicDowncast<const BroadcastToLayer*>(&layer);
189  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
190  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
191 
192  result = layerSupportObject.IsBroadcastToSupported(OverrideDataType(input, dataType),
193  OverrideDataType(output, dataType),
194  cLayer->GetParameters(),
195  reason);
196  break;
197  }
198  case LayerType::Cast:
199  {
200  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
201  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
202 
203  result = layerSupportObject.IsCastSupported(OverrideDataType(input, dataType),
204  OverrideDataType(output, dataType),
205  reason);
206  break;
207  }
209  {
210  auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
211 
212  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
213  const TensorInfo& output = layer.GetInputSlot(0).GetTensorInfo();
214 
215  const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
216 
217  result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
218  OverrideDataType(output, dataType),
219  descriptor,
220  reason);
221  break;
222  }
224  {
225  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
226 
227  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
228  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
229  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
230 
231  result = layerSupportObject.IsComparisonSupported(OverrideDataType(input0, dataType),
232  OverrideDataType(input1, dataType),
233  OverrideDataType(output, DataType::Boolean),
234  cLayer->GetParameters(),
235  reason);
236  break;
237  }
238  case LayerType::Constant:
239  {
240  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
241  result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
242  break;
243  }
245  {
246  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
247  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
248  result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
249  break;
250  }
252  {
253  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
254  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
255  result = layerSupportObject.IsConvertFp32ToFp16Supported(input, output, reason);
256  break;
257  }
259  {
260  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
261 
262  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
263  dataType);
264  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
265 
266  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(layer.GetInputSlot(1).GetConnection(),
267  "Convolution2dLayer: Weights should be connected as a Constant Layer.");
268  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
269  dataType);
270 
271  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
272 
273  // Construct optional biases object based on the value of m_BiasEnabled
274  Optional<TensorInfo> biases;
275  if (descriptor.m_BiasEnabled)
276  {
277  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(layer.GetInputSlot(2).GetConnection(),
278  "Convolution2dLayer:Bias should be connected as a Constant Layer.");
279  biases = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
280  GetBiasTypeFromWeightsType(dataType));
281  }
282 
283  result = layerSupportObject.IsConvolution2dSupported(
284  input,
285  output,
286  descriptor,
287  weights,
288  biases,
289  reason);
290  break;
291  }
293  {
294  auto cLayer = PolymorphicDowncast<const Convolution3dLayer*>(&layer);
295 
296  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
297  dataType);
298  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
299 
300  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(layer.GetInputSlot(1).GetConnection(),
301  "Convolution3dLayer: Weights should be connected as a Constant Layer.");
302  const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
303  dataType);
304 
305  const Convolution3dDescriptor& descriptor = cLayer->GetParameters();
306 
307  // Construct optional biases object based on the value of m_BiasEnabled
308  Optional<TensorInfo> biases;
309  if (descriptor.m_BiasEnabled)
310  {
311  biases = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
312  GetBiasTypeFromWeightsType(dataType));
313  }
314 
315  result = layerSupportObject.IsConvolution3dSupported(
316  input,
317  output,
318  descriptor,
319  weights,
320  biases,
321  reason);
322  break;
323  }
324  case LayerType::Debug:
325  {
326  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
327  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
328 
329  result = layerSupportObject.IsDebugSupported(OverrideDataType(input, dataType),
330  OverrideDataType(output, dataType),
331  reason);
332  break;
333  }
335  {
336  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
337 
338  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
339  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
340 
341  result = layerSupportObject.IsDepthToSpaceSupported(OverrideDataType(input, dataType),
342  OverrideDataType(output, dataType),
343  cLayer->GetParameters(),
344  reason);
345  break;
346  }
348  {
349  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
350  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
351  dataType);
352  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
353  const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
354  dataType);
355 
356  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
357 
358  // Construct optional biases object based on the value of m_BiasEnabled
359  Optional<TensorInfo> biases;
360  if (descriptor.m_BiasEnabled)
361  {
362  biases = OverrideDataType(cLayer->GetInputSlot(2).GetTensorInfo(),
363  GetBiasTypeFromWeightsType(dataType));
364  }
365 
366  result = layerSupportObject.IsDepthwiseConvolutionSupported(input,
367  output,
368  descriptor,
369  weights,
370  biases,
371  reason);
372  break;
373  }
375  {
376  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
377  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
378 
379  result = layerSupportObject.IsDequantizeSupported(input,
380  OverrideDataType(output, dataType),
381  reason);
382  break;
383  }
385  {
386  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
387  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetTensorInfo();
388  const TensorInfo& scores = layer.GetInputSlot(1).GetTensorInfo();
389  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
390 
391  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
392  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
393  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
394  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
395 
396  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
397  result = layerSupportObject.IsDetectionPostProcessSupported(boxEncodings,
398  scores,
399  anchors,
400  detectionBoxes,
401  detectionClasses,
402  detectionScores,
403  numDetections,
404  descriptor,
405  reason);
406  break;
407  }
409  {
410  auto cLayer = PolymorphicDowncast<const ElementwiseBinaryLayer*>(&layer);
411 
412  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
413  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
414  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
415  std::vector<TensorInfo> infos = { OverrideDataType(input0, dataType),
416  OverrideDataType(input1, dataType),
417  OverrideDataType(output, dataType) };
418  result = layerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
419  infos,
420  cLayer->GetParameters(),
421  EmptyOptional(),
422  EmptyOptional(),
423  reason);
424  break;
425  }
427  {
428  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
429 
430  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
431  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
432 
433  result = layerSupportObject.IsElementwiseUnarySupported(OverrideDataType(input, dataType),
434  OverrideDataType(output, dataType),
435  cLayer->GetParameters(),
436  reason);
437  break;
438  }
439  case LayerType::Fill:
440  {
441  auto cLayer = PolymorphicDowncast<const FillLayer*>(&layer);
442  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
443  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
444  const FillDescriptor& descriptor = cLayer->GetParameters();
445 
446  result = layerSupportObject.IsFillSupported(
447  OverrideDataType(input, dataType),
448  OverrideDataType(output, dataType),
449  descriptor,
450  reason);
451  break;
452  }
454  {
455  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
456  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
457  result = layerSupportObject.IsFakeQuantizationSupported(OverrideDataType(input, dataType),
458  cLayer->GetParameters(),
459  reason);
460  break;
461  }
462  case LayerType::Floor:
463  {
464  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
465  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
466  result = layerSupportObject.IsFloorSupported(OverrideDataType(input, dataType),
467  OverrideDataType(output, dataType),
468  reason);
469  break;
470  }
472  {
473  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
474  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
475  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
476 
477  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
478  TensorInfo weightsInfo;
479  const TensorInfo* weightsInfoPtr = nullptr;
480 
481  weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(), dataType);
482  weightsInfoPtr = &weightsInfo;
483 
484  TensorInfo biasInfo;
485  const TensorInfo* biasInfoPtr = nullptr;
486  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
487  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
488  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
489  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
490 
491  if (descriptor.m_BiasEnabled)
492  {
493  biasInfo = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(), dataType);
494  biasInfoPtr = &biasInfo;
495  }
496  else
497  {
498  // If biases are not enabled pass a dummy tensorinfo for the validation
499  switch(input.GetDataType())
500  {
501  case DataType::BFloat16:
502  {
503  biasInfoPtr = &dummyBFloat16Bias;
504  break;
505  }
506  case DataType::Float16:
507  {
508  biasInfoPtr = &dummyFloat16Bias;
509  break;
510  }
511  case DataType::Float32:
512  {
513  biasInfoPtr = &dummyFloat32Bias;
514  break;
515  }
516  case DataType::QAsymmU8:
517  case DataType::QAsymmS8:
518  case DataType::QSymmS8:
519  case DataType::QSymmS16:
520  {
521  biasInfoPtr = &dummyQA8Bias;
522  break;
523  }
524  default:
525  {
526  throw InvalidArgumentException("Unexpected bias type");
527  }
528  }
529  }
530  result = layerSupportObject.IsFullyConnectedSupported(
531  OverrideDataType(input, dataType),
532  OverrideDataType(output, dataType),
533  *weightsInfoPtr,
534  *biasInfoPtr,
535  descriptor,
536  reason);
537  break;
538  }
539  case LayerType::Fused:
540  {
541  auto cLayer = PolymorphicDowncast<const FusedLayer*>(&layer);
542 
543  // Get vector of all outputs.
544  auto getOutTensorInfo = [&dataType](const OutputSlot& slot)
545  {
546  return OverrideDataType(slot.GetTensorInfo(), dataType);
547  };
548  auto beginOutputs = MakeTransformIterator(layer.GetOutputSlots().begin(), getOutTensorInfo);
549  auto endOutputs = MakeTransformIterator(layer.GetOutputSlots().end(), getOutTensorInfo);
550  std::vector<TensorInfo> outputs(beginOutputs, endOutputs);
551  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
552 
553  // Get vector of all inputs.
554  auto getInputTensorInfo = [&dataType](const InputSlot& slot)
555  {
556  return OverrideDataType(slot.GetTensorInfo(), dataType);
557  };
558  auto beginInputs = MakeTransformIterator(layer.GetInputSlots().begin(), getInputTensorInfo);
559  auto endInputs = MakeTransformIterator(layer.GetInputSlots().end(), getInputTensorInfo);
560  std::vector<TensorInfo> inputs(beginInputs, endInputs);
561  const std::vector<std::reference_wrapper<TensorInfo>> inputPtrs(inputs.begin(), inputs.end());
562 
563  result = layerSupportObject.IsFusedSupported(inputPtrs,
564  outputPtrs,
565  cLayer->GetParameters(),
566  reason);
567  break;
568  }
569  case LayerType::Gather:
570  {
571  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
572  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
573  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
574  auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
575  const GatherDescriptor& descriptor = cLayer->GetParameters();
576  result = layerSupportObject.IsGatherSupported(OverrideDataType(input0, dataType),
577  input1,
578  OverrideDataType(output, dataType),
579  descriptor,
580  reason);
581  break;
582  }
583  case LayerType::GatherNd:
584  {
585  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
586  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
587  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
588  result = layerSupportObject.IsGatherNdSupported(OverrideDataType(input0, dataType),
589  input1,
590  OverrideDataType(output, dataType),
591  reason);
592  break;
593  }
594  case LayerType::Input:
595  {
596  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
597  result = layerSupportObject.IsInputSupported(OverrideDataType(input, dataType), reason);
598  break;
599  }
601  {
602  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
603  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
604 
605  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
606  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
607 
608  result = layerSupportObject.IsInstanceNormalizationSupported(
609  OverrideDataType(input, dataType),
610  OverrideDataType(output, dataType),
611  descriptor,
612  reason);
613  break;
614  }
616  {
617  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
618  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
619 
620  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
621  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
622 
623  result = layerSupportObject.IsL2NormalizationSupported(
624  OverrideDataType(input, dataType),
625  OverrideDataType(output, dataType),
626  descriptor,
627  reason);
628  break;
629  }
631  {
632  auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
633 
634  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
635  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
636  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
637 
638  result = layerSupportObject.IsLogicalBinarySupported(input0,
639  input1,
640  output,
641  cLayer->GetParameters(),
642  reason);
643  break;
644  }
646  {
647  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
648 
649  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
650  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
651 
652  result = layerSupportObject.IsLogSoftmaxSupported(OverrideDataType(input, dataType),
653  OverrideDataType(output, dataType),
654  cLayer->GetParameters(),
655  reason);
656  break;
657  }
658  case LayerType::Lstm:
659  {
660  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
661  const LstmDescriptor& descriptor = cLayer->GetParameters();
662 
663  // All inputs.
664  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
665  dataType);
666  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
667  dataType);
668  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
669  dataType);
670  // All outputs
671  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
672  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
673  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
674  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
675 
676  // Basic parameters
677  const TensorInfo& inputToForgetWeights
678  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
679  const TensorInfo& inputToCellWeights
680  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
681  const TensorInfo& inputToOutputWeights
682  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
683  const TensorInfo& recurrentToForgetWeights
684  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
685  const TensorInfo& recurrentToCellWeights
686  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
687  const TensorInfo& recurrentToOutputWeights
688  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
689  const TensorInfo& forgetGateBias
690  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
691  const TensorInfo& cellBias
692  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
693  const TensorInfo& outputGateBias
694  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
695 
696  LstmInputParamsInfo paramsInfo;
697 
698  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
699  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
700  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
701  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
702  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
703  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
704  paramsInfo.m_ForgetGateBias = &forgetGateBias;
705  paramsInfo.m_CellBias = &cellBias;
706  paramsInfo.m_OutputGateBias = &outputGateBias;
707 
708 
709  // Optional parameters
710  TensorInfo optInputToInputWeights;
711  TensorInfo optRecurrentToInputWeights;
712  TensorInfo optCellToInputWeights;
713  TensorInfo optInputGateBias;
714  TensorInfo optProjectionWeights;
715  TensorInfo optProjectionBias;
716  TensorInfo optCellToForgetWeights;
717  TensorInfo optCellToOutputWeights;
718  TensorInfo optInputLayerNormWeights;
719  TensorInfo optForgetLayerNormWeights;
720  TensorInfo optCellLayerNormWeights;
721  TensorInfo optOutputLayerNormWeights;
722 
723  if(!descriptor.m_CifgEnabled)
724  {
725  optInputToInputWeights =
726  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
727  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
728 
729  optRecurrentToInputWeights =
730  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
731  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
732  optInputGateBias =
733  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
734  paramsInfo.m_InputGateBias = &optInputGateBias;
735  }
736 
737  if(descriptor.m_ProjectionEnabled)
738  {
739  optProjectionWeights =
740  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
741  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
742  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
743  {
744  optProjectionBias =
745  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
746  paramsInfo.m_ProjectionBias = &optProjectionBias;
747  }
748  }
749 
750  if(descriptor.m_PeepholeEnabled)
751  {
752  if(!descriptor.m_CifgEnabled)
753  {
754  optCellToInputWeights =
755  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
756  dataType);
757  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
758  }
759  optCellToForgetWeights =
760  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
761  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
762  optCellToOutputWeights =
763  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
764  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
765  }
766 
767  if(descriptor.m_LayerNormEnabled)
768  {
769  if (!descriptor.m_CifgEnabled)
770  {
771  optInputLayerNormWeights = OverrideDataType(
772  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
773  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
774  }
775 
776  optForgetLayerNormWeights = OverrideDataType(
777  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
778  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
779 
780  optCellLayerNormWeights = OverrideDataType(
781  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
782  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
783 
784  optOutputLayerNormWeights = OverrideDataType(
785  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
786  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
787  }
788 
789  result = layerSupportObject.IsLstmSupported(
790  input,
791  outputStateIn,
792  cellStateIn,
793  scratchBuffer,
794  outputStateOut,
795  cellStateOut,
796  output,
797  descriptor,
798  paramsInfo,
799  reason);
800  break;
801  }
802  case LayerType::Maximum:
803  {
805  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
806  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
807  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
808 
809  result = layerSupportObject.IsMaximumSupported(OverrideDataType(input0, dataType),
810  OverrideDataType(input1, dataType),
811  OverrideDataType(output, dataType),
812  reason);
814  break;
815  }
816  case LayerType::MemCopy:
817  {
818  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
819  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
820 
821  result = layerSupportObject.IsMemCopySupported(OverrideDataType(input, dataType),
822  OverrideDataType(output, dataType),
823  reason);
824  break;
825  }
827  {
828  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
829  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
830 
831  result = layerSupportObject.IsMemImportSupported(OverrideDataType(input, dataType),
832  OverrideDataType(output, dataType),
833  reason);
834  break;
835  }
836  case LayerType::Merge:
837  {
838  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
839  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
840  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
841 
842  result = layerSupportObject.IsMergeSupported(OverrideDataType(input0, dataType),
843  OverrideDataType(input1, dataType),
844  OverrideDataType(output, dataType),
845  reason);
846  break;
847  }
848  case LayerType::Concat:
849  {
850  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
851 
852  // Get vector of all inputs.
853  auto getTensorInfo = [&dataType](const InputSlot& slot)
854  {
855  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
856  };
857 
858  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
859  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
860  std::vector<TensorInfo> inputs(beginI, endI);
861 
862  auto getTensorInfoPtr = [](const TensorInfo& info)
863  {
864  return &info;
865  };
866 
867  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
868  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
869  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
870 
871  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
872 
873  result = layerSupportObject.IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
874 
875 
876  break;
877  }
879  {
881  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
882  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
883  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
884  result = layerSupportObject.IsMultiplicationSupported(
885  OverrideDataType(input0, dataType),
886  OverrideDataType(input1, dataType),
887  OverrideDataType(output, dataType),
888  reason);
890  break;
891  }
893  {
894  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
895  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
896  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
897  result = layerSupportObject.IsNormalizationSupported(OverrideDataType(input, dataType),
898  OverrideDataType(output, dataType),
899  cLayer->GetParameters(),
900  reason);
901  break;
902  }
903  case LayerType::Output:
904  {
905  const TensorInfo& output = layer.GetInputSlot(0).GetTensorInfo();
906  result = layerSupportObject.IsOutputSupported(OverrideDataType(output, dataType), reason);
907  break;
908  }
909  case LayerType::Permute:
910  {
911  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
912  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
913  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
914  result = layerSupportObject.IsPermuteSupported(OverrideDataType(input, dataType),
915  OverrideDataType(output, dataType),
916  cLayer->GetParameters(),
917  reason);
918  break;
919  }
920  case LayerType::Pad:
921  {
922  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
923  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
924  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
925  result = layerSupportObject.IsPadSupported(
926  OverrideDataType(input, dataType),
927  OverrideDataType(output, dataType),
928  cLayer->GetParameters(),
929  reason);
930  break;
931  }
933  {
934  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
935  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
936  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
937  result = layerSupportObject.IsPooling2dSupported(OverrideDataType(input, dataType),
938  OverrideDataType(output, dataType),
939  cLayer->GetParameters(),
940  reason);
941  break;
942  }
944  {
945  auto cLayer = PolymorphicDowncast<const Pooling3dLayer*>(&layer);
946  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
947  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
948  result = layerSupportObject.IsPooling3dSupported(OverrideDataType(input, dataType),
949  OverrideDataType(output, dataType),
950  cLayer->GetParameters(),
951  reason);
952  break;
953  }
955  {
956  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
957  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
958  result = layerSupportObject.IsPreCompiledSupported(OverrideDataType(input, dataType),
959  cLayer->GetParameters(),
960  reason);
961  break;
962  }
963  case LayerType::Quantize:
964  {
965  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
966  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
967  result = layerSupportObject.IsQuantizeSupported(input, output, reason);
968  break;
969  }
970  case LayerType::QLstm:
971  {
972  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
973  const QLstmDescriptor& descriptor = cLayer->GetParameters();
974 
975  // Inputs
976  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
977  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetTensorInfo();
978  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetTensorInfo();
979 
980  // Outputs
981  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
982  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
983  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
984 
985  // Lstm parameters
986  LstmInputParamsInfo paramsInfo;
987 
988  // Basic parameters
989  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
990  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
991  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
992 
993  paramsInfo.m_RecurrentToForgetWeights =
994  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
995  paramsInfo.m_RecurrentToCellWeights =
996  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
997  paramsInfo.m_RecurrentToOutputWeights =
998  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
999 
1000  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
1001  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
1002  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
1003 
1004  if(!descriptor.m_CifgEnabled)
1005  {
1006  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
1007  paramsInfo.m_RecurrentToInputWeights =
1008  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
1009  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
1010  }
1011 
1012  if(descriptor.m_ProjectionEnabled)
1013  {
1014  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
1015 
1016  // Projection bias is optional even if projection is enabled
1017  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1018  {
1019  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
1020  }
1021  }
1022 
1023  if(descriptor.m_PeepholeEnabled)
1024  {
1025  if (!descriptor.m_CifgEnabled)
1026  {
1027  paramsInfo.m_CellToInputWeights =
1028  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
1029  }
1030 
1031  paramsInfo.m_CellToForgetWeights =
1032  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
1033  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
1034  }
1035 
1036  if(descriptor.m_LayerNormEnabled)
1037  {
1038  if (!descriptor.m_CifgEnabled)
1039  {
1040  paramsInfo.m_InputLayerNormWeights =
1041  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
1042  }
1043 
1044  paramsInfo.m_ForgetLayerNormWeights =
1045  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
1046  paramsInfo.m_CellLayerNormWeights =
1047  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
1048  paramsInfo.m_OutputLayerNormWeights =
1049  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
1050  }
1051 
1052  result = layerSupportObject.IsQLstmSupported(input,
1053  previousOutputIn,
1054  previousCellStateIn,
1055  outputStateOut,
1056  cellStateOut,
1057  output,
1058  descriptor,
1059  paramsInfo,
1060  reason);
1061  break;
1062  }
1064  {
1065  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
1066 
1067  // Inputs
1068  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1069  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetTensorInfo();
1070  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetTensorInfo();
1071 
1072  // Outputs
1073  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
1074  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
1075 
1076  // QuantizedLstm parameters
1077  QuantizedLstmInputParamsInfo paramsInfo;
1078 
1079  paramsInfo.m_InputToInputWeights =
1080  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
1081  paramsInfo.m_InputToForgetWeights =
1082  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
1083  paramsInfo.m_InputToCellWeights =
1084  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
1085  paramsInfo.m_InputToOutputWeights =
1086  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
1087 
1088  paramsInfo.m_RecurrentToInputWeights =
1089  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
1090  paramsInfo.m_RecurrentToForgetWeights =
1091  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
1092  paramsInfo.m_RecurrentToCellWeights =
1093  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
1094  paramsInfo.m_RecurrentToOutputWeights =
1095  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
1096 
1097  paramsInfo.m_InputGateBias =
1098  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
1099  paramsInfo.m_ForgetGateBias =
1100  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
1101  paramsInfo.m_CellBias =
1102  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
1103  paramsInfo.m_OutputGateBias =
1104  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
1105 
1106  result = layerSupportObject.IsQuantizedLstmSupported(input,
1107  previousCellStateIn,
1108  previousOutputIn,
1109  cellStateOut,
1110  output,
1111  paramsInfo,
1112  reason);
1113  break;
1114  }
1115  case LayerType::Division:
1116  {
1118  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1119  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1120  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1121  result = layerSupportObject.IsDivisionSupported(
1122  OverrideDataType(input0, dataType),
1123  OverrideDataType(input1, dataType),
1124  OverrideDataType(output, dataType),
1125  reason);
1127  break;
1128  }
1129  case LayerType::Rank:
1130  {
1131  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1132  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1133  result = layerSupportObject.IsRankSupported(OverrideDataType(input, dataType),
1134  OverrideDataType(output, dataType),
1135  reason);
1136  break;
1137  }
1138  case LayerType::Reshape:
1139  {
1140  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
1141  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1142  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1143  result = layerSupportObject.IsReshapeSupported(OverrideDataType(input, dataType),
1144  OverrideDataType(output, dataType),
1145  cLayer->GetParameters(),
1146  reason);
1147  break;
1148  }
1149  case LayerType::Resize:
1150  {
1151  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
1152  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1153  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1154  result = layerSupportObject.IsResizeSupported(OverrideDataType(input, dataType),
1155  OverrideDataType(output, dataType),
1156  cLayer->GetParameters(),
1157  reason);
1158  break;
1159  }
1160  case LayerType::ReverseV2:
1161  {
1162  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1163  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1164  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1165  result = layerSupportObject.IsReverseV2Supported(OverrideDataType(input0, dataType),
1166  OverrideDataType(input1, armnn::DataType::Signed32),
1167  OverrideDataType(output, dataType),
1168  reason);
1169  break;
1170  }
1171  case LayerType::ScatterNd:
1172  {
1173  auto cLayer = PolymorphicDowncast<const ScatterNdLayer*>(&layer);
1174  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1175  const TensorInfo& indices = layer.GetInputSlot(1).GetTensorInfo();
1176  const TensorInfo& updates = layer.GetInputSlot(2).GetTensorInfo();
1177  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1178 
1179  result = layerSupportObject.IsScatterNdSupported(OverrideDataType(input, dataType),
1180  OverrideDataType(indices, dataType),
1181  OverrideDataType(updates, dataType),
1182  OverrideDataType(output, dataType),
1183  cLayer->GetParameters(),
1184  reason);
1185 
1186  break;
1187  }
1188  case LayerType::Shape:
1189  {
1190  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1191  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1192 
1193  result = layerSupportObject.IsShapeSupported(OverrideDataType(input, dataType),
1194  OverrideDataType(output, dataType),
1195  reason);
1196  break;
1197  }
1198  case LayerType::Slice:
1199  {
1200  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
1201 
1202  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1203  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1204 
1205  result = layerSupportObject.IsSliceSupported(OverrideDataType(input, dataType),
1206  OverrideDataType(output, dataType),
1207  cLayer->GetParameters(),
1208  reason);
1209  break;
1210  }
1211  case LayerType::Softmax:
1212  {
1213  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
1214  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1215  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1216  result = layerSupportObject.IsSoftmaxSupported(OverrideDataType(input, dataType),
1217  OverrideDataType(output, dataType),
1218  cLayer->GetParameters(),
1219  reason);
1220  break;
1221  }
1223  {
1224  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
1225  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1226  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1227  result = layerSupportObject.IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
1228  OverrideDataType(output, dataType),
1229  cLayer->GetParameters(),
1230  reason);
1231  break;
1232  }
1234  {
1235  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
1236 
1237  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1238  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1239 
1240  result = layerSupportObject.IsSpaceToDepthSupported(OverrideDataType(input, dataType),
1241  OverrideDataType(output, dataType),
1242  cLayer->GetParameters(),
1243  reason);
1244  break;
1245  }
1246  case LayerType::Splitter:
1247  {
1248  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
1249  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1250 
1251  // Get vector of all outputs.
1252  auto getTensorInfo = [&dataType](const OutputSlot& slot)
1253  {
1254  return OverrideDataType(slot.GetTensorInfo(), dataType);
1255  };
1256  auto beginI = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfo);
1257  auto endI = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfo);
1258  std::vector<TensorInfo> outputs(beginI, endI);
1259 
1260  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
1261 
1262  result = layerSupportObject.IsSplitterSupported(OverrideDataType(input, dataType),
1263  outputPtrs,
1264  cLayer->GetParameters(),
1265  reason);
1266  break;
1267  }
1268  case LayerType::Stack:
1269  {
1270  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1271 
1272  // Get vector of all inputs.
1273  auto getTensorInfo = [&dataType](const InputSlot& slot)
1274  {
1275  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1276  };
1277  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfo);
1278  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfo);
1279  std::vector<TensorInfo> inputs(beginI, endI);
1280 
1281  auto getTensorInfoPtr = [](const TensorInfo& info)
1282  {
1283  return &info;
1284  };
1285  auto beginPtr = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1286  auto endPtr = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1287  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1288 
1289  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1290 
1291  result = layerSupportObject.IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1292 
1293  break;
1294  }
1295  case LayerType::StandIn:
1296  {
1297  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1298 
1299  // Get vector of all inputs.
1300  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1301  {
1302  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1303  };
1304  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1305  {
1306  return OverrideDataType(slot.GetTensorInfo(), dataType);
1307  };
1308  auto beginI = MakeTransformIterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1309  auto endI = MakeTransformIterator(layer.GetInputSlots().end(), getTensorInfoIn);
1310  std::vector<TensorInfo> inputs(beginI, endI);
1311 
1312  auto beginO = MakeTransformIterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1313  auto endO = MakeTransformIterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1314  std::vector<TensorInfo> outputs(beginO, endO);
1315 
1316 
1317  auto getTensorInfoPtr = [](const TensorInfo& info)
1318  {
1319  return &info;
1320  };
1321  auto beginPtrI = MakeTransformIterator(inputs.begin(), getTensorInfoPtr);
1322  auto endPtrI = MakeTransformIterator(inputs.end(), getTensorInfoPtr);
1323  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1324 
1325  auto beginPtrO = MakeTransformIterator(outputs.begin(), getTensorInfoPtr);
1326  auto endPtrO = MakeTransformIterator(outputs.end(), getTensorInfoPtr);
1327  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1328 
1329 
1330  result = layerSupportObject.IsStandInSupported(inputPtrs,
1331  outputPtrs,
1332  cLayer->GetParameters(),
1333  reason);
1334  break;
1335  }
1337  {
1338  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1339  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1340  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1341  result = layerSupportObject.IsStridedSliceSupported(OverrideDataType(input, dataType),
1342  OverrideDataType(output, dataType),
1343  cLayer->GetParameters(),
1344  reason);
1345  break;
1346  }
1348  {
1350  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1351  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1352  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1353  result = layerSupportObject.IsSubtractionSupported(
1354  OverrideDataType(input0, dataType),
1355  OverrideDataType(input1, dataType),
1356  OverrideDataType(output, dataType),
1357  reason);
1359  break;
1360  }
1361  case LayerType::Switch:
1362  {
1363  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1364  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1365  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1366  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1367  result = layerSupportObject.IsSwitchSupported(OverrideDataType(input0, dataType),
1368  OverrideDataType(input1, dataType),
1369  OverrideDataType(output0, dataType),
1370  OverrideDataType(output1, dataType),
1371  reason);
1372  break;
1373  }
1374  case LayerType::Mean:
1375  {
1376  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1377  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1378  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1379  result = layerSupportObject.IsMeanSupported(
1380  OverrideDataType(input, dataType),
1381  OverrideDataType(output, dataType),
1382  cLayer->GetParameters(),
1383  reason);
1384  break;
1385  }
1386  case LayerType::Minimum:
1387  {
1389  const TensorInfo& input0 = layer.GetInputSlot(0).GetTensorInfo();
1390  const TensorInfo& input1 = layer.GetInputSlot(1).GetTensorInfo();
1391  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1392  result = layerSupportObject.IsMinimumSupported(OverrideDataType(input0, dataType),
1393  OverrideDataType(input1, dataType),
1394  OverrideDataType(output, dataType),
1395  reason);
1397  break;
1398  }
1399  case LayerType::Prelu:
1400  {
1401  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1402  const TensorInfo& alpha = layer.GetInputSlot(1).GetTensorInfo();
1403  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1404  result = layerSupportObject.IsPreluSupported(OverrideDataType(input, dataType),
1405  OverrideDataType(alpha, dataType),
1406  OverrideDataType(output, dataType),
1407  reason);
1408  break;
1409  }
1410  case LayerType::Tile:
1411  {
1412  auto cLayer = PolymorphicDowncast<const TileLayer*>(&layer);
1413  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1414  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1415 
1416  result = layerSupportObject.IsTileSupported(OverrideDataType(input, dataType),
1417  OverrideDataType(output, dataType),
1418  cLayer->GetParameters(),
1419  reason);
1420 
1421  break;
1422  }
1423  case LayerType::Transpose:
1424  {
1425  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1426  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1427  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1428  result = layerSupportObject.IsTransposeSupported(OverrideDataType(input, dataType),
1429  OverrideDataType(output, dataType),
1430  cLayer->GetParameters(),
1431  reason);
1432  break;
1433  }
1435  {
1436  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1437 
1438  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
1439  dataType);
1440  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1441 
1442  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1443 
1444  Optional<TensorInfo> biases;
1445  if (descriptor.m_BiasEnabled)
1446  {
1448  cLayer->m_Bias.get() != nullptr,
1449  "TransposeConvolution2d: Bias was enabled in the descriptor but no value was supplied.");
1450  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1451  GetBiasTypeFromWeightsType(dataType));
1452  }
1453 
1454  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(cLayer->m_Weight.get() != nullptr,
1455  "TransposeConvolution2d: Weights cannot be null.");
1456  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1457 
1458  result = layerSupportObject.IsTransposeConvolution2dSupported(input,
1459  output,
1460  descriptor,
1461  weights,
1462  biases,
1463  reason);
1464 
1465  break;
1466  }
1467  case LayerType::Reduce:
1468  {
1469  auto cLayer = PolymorphicDowncast<const ReduceLayer*>(&layer);
1470  const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo();
1471  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1472 
1473  result = layerSupportObject.IsReduceSupported(OverrideDataType(input, dataType),
1474  OverrideDataType(output, dataType),
1475  cLayer->GetParameters(),
1476  reason);
1477  break;
1478  }
1480  {
1481  auto cLayer = PolymorphicDowncast<const UnidirectionalSequenceLstmLayer*>(&layer);
1482  const UnidirectionalSequenceLstmDescriptor& descriptor = cLayer->GetParameters();
1483 
1484  // All inputs.
1485  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
1486  dataType);
1487  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
1488  dataType);
1489  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
1490  dataType);
1491  // Outputs
1492  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1493  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
1494  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
1495 
1496  // Basic parameters
1497  const TensorInfo& inputToForgetWeights
1498  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
1499  const TensorInfo& inputToCellWeights
1500  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
1501  const TensorInfo& inputToOutputWeights
1502  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
1503  const TensorInfo& recurrentToForgetWeights
1504  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
1505  const TensorInfo& recurrentToCellWeights
1506  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
1507  const TensorInfo& recurrentToOutputWeights
1508  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
1509  const TensorInfo& forgetGateBias
1510  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
1511  const TensorInfo& cellBias
1512  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
1513  const TensorInfo& outputGateBias
1514  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
1515 
1516  LstmInputParamsInfo paramsInfo;
1517 
1518  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
1519  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
1520  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
1521  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1522  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
1523  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1524  paramsInfo.m_ForgetGateBias = &forgetGateBias;
1525  paramsInfo.m_CellBias = &cellBias;
1526  paramsInfo.m_OutputGateBias = &outputGateBias;
1527 
1528  // Optional parameters
1529  TensorInfo optInputToInputWeights;
1530  TensorInfo optRecurrentToInputWeights;
1531  TensorInfo optCellToInputWeights;
1532  TensorInfo optInputGateBias;
1533  TensorInfo optProjectionWeights;
1534  TensorInfo optProjectionBias;
1535  TensorInfo optCellToForgetWeights;
1536  TensorInfo optCellToOutputWeights;
1537  TensorInfo optInputLayerNormWeights;
1538  TensorInfo optForgetLayerNormWeights;
1539  TensorInfo optCellLayerNormWeights;
1540  TensorInfo optOutputLayerNormWeights;
1541 
1542  if(!descriptor.m_CifgEnabled)
1543  {
1544  optInputToInputWeights =
1545  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
1546  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
1547 
1548  optRecurrentToInputWeights =
1549  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
1550  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
1551  optInputGateBias =
1552  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
1553  paramsInfo.m_InputGateBias = &optInputGateBias;
1554  }
1555 
1556  if(descriptor.m_ProjectionEnabled)
1557  {
1558  optProjectionWeights =
1559  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
1560  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
1561  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
1562  {
1563  optProjectionBias =
1564  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
1565  paramsInfo.m_ProjectionBias = &optProjectionBias;
1566  }
1567  }
1568 
1569  if(descriptor.m_PeepholeEnabled)
1570  {
1571  if(!descriptor.m_CifgEnabled)
1572  {
1573  optCellToInputWeights =
1574  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
1575  dataType);
1576  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
1577  }
1578  optCellToForgetWeights =
1579  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
1580  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
1581  optCellToOutputWeights =
1582  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
1583  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
1584  }
1585 
1586  if(descriptor.m_LayerNormEnabled)
1587  {
1588  if (!descriptor.m_CifgEnabled)
1589  {
1590  optInputLayerNormWeights = OverrideDataType(
1591  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
1592  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
1593  }
1594 
1595  optForgetLayerNormWeights = OverrideDataType(
1596  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
1597  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
1598 
1599  optCellLayerNormWeights = OverrideDataType(
1600  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
1601  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
1602 
1603  optOutputLayerNormWeights = OverrideDataType(
1604  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
1605  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
1606  }
1607 
1608  result = layerSupportObject.IsUnidirectionalSequenceLstmSupported(input,
1609  outputStateIn,
1610  cellStateIn,
1611  outputStateOut,
1612  cellStateOut,
1613  output,
1614  descriptor,
1615  paramsInfo,
1616  reason);
1617  break;
1618  }
1619  default:
1620  {
1621  reason.value() = "Unrecognised layer type";
1622  result = false;
1623  break;
1624  }
1625  }
1626  return result;
1627 }
1628 
1630  const IConnectableLayer& connectableLayer,
1631  Optional<DataType> dataType,
1632  std::string& outReasonIfUnsupported)
1633 {
1634  return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
1635 }
1636 
1638  Optional<DataType> dataType,
1639  std::string& outReasonIfUnsupported)
1640 {
1641  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1642  return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1643 }
1644 
1646  Optional<DataType> dataType,
1647  std::string& outReasonIfUnsupported,
1648  const ModelOptions& modelOptions)
1649 {
1650  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1651  return IsLayerConfigurationSupported(layer->GetBackendId(),
1652  connectableLayer,
1653  dataType,
1654  outReasonIfUnsupported,
1655  modelOptions);
1656 }
1657 
1659  const IConnectableLayer& connectableLayer,
1660  Optional<DataType> dataType,
1661  std::string& outReasonIfUnsupported,
1662  const ModelOptions& modelOptions)
1663 {
1664  return IsLayerConfigurationSupported(backendId,
1665  connectableLayer,
1666  dataType,
1667  outReasonIfUnsupported,
1668  modelOptions);
1669 }
1670 
1671 } // namepsace armnn
BackendHelper.hpp
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::DataType::Boolean
@ Boolean
armnn::LayerType::Permute
@ Permute
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::Optional
Definition: Optional.hpp:270
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::Floor
@ Floor
armnn::UnidirectionalSequenceLstmDescriptor
LstmDescriptor UnidirectionalSequenceLstmDescriptor
Definition: Descriptors.hpp:1169
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::DataType::Float32
@ Float32
armnn::GetBiasTypeFromWeightsType
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
Definition: LayerSupportRules.hpp:13
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::LayerType::Tile
@ Tile
armnn::MakeTransformIterator
constexpr TransformIterator< Function, Iterator > MakeTransformIterator(Iterator i, Function f)
Definition: TransformIterator.hpp:90
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::LayerType::Stack
@ Stack
BackendRegistry.hpp
armnn::DataType::QSymmS8
@ QSymmS8
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
ILayerSupport.hpp
TransformIterator.hpp
armnn::DataType::QSymmS16
@ QSymmS16
WorkloadFactory.hpp
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::DataType::BFloat16
@ BFloat16
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::Slice
@ Slice
armnn::DataType::Float16
@ Float16
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::ScatterNd
@ ScatterNd
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::Concat
@ Concat
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Merge
@ Merge
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::LayerType::StandIn
@ StandIn
armnn::LayerType::Debug
@ Debug
IBackendInternal.hpp
armnn::LayerType::Softmax
@ Softmax
LayersFwd.hpp
armnn::BackendRegistryInstance
BackendRegistry & BackendRegistryInstance()
Definition: BackendRegistry.cpp:15
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::LayerType::BroadcastTo
@ BroadcastTo
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1629
armnn::LayerType::Division
@ Division
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::LayerType::Switch
@ Switch
armnn::LayerType::Reshape
@ Reshape
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::Fused
@ Fused
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BackendId
Definition: BackendId.hpp:75
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
Types.hpp
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
Layer.hpp
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::LayerType::Input
@ Input
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::QLstm
@ QLstm
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
ARMNN_THROW_INVALIDARG_MSG_IF_FALSE
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
Definition: Exceptions.hpp:210