ArmNN
 24.02
NeonWorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonBackendId.hpp"
8 #include "NeonTensorHandle.hpp"
10 
11 #include <Layer.hpp>
12 
13 #include <armnn/Utils.hpp>
17 
22 
25 
26 namespace armnn
27 {
28 
29 namespace
30 {
31 static const BackendId s_Id{NeonBackendId()};
32 }
33 
35  Optional<DataType> dataType,
36  std::string& outReasonIfUnsupported)
37 {
38  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
39 }
40 
42  Optional<DataType> dataType,
43  std::string& outReasonIfUnsupported,
44  const ModelOptions& modelOptions)
45 {
46  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
47 }
48 
50 {
51  return s_Id;
52 }
53 
54 void NeonWorkloadFactory::SetNumberOfThreads()
55 {
56  if (m_ModelContextPtr)
57  {
58  const unsigned int MIN_THREADS = 1;
59  const unsigned int MAX_THREADS = 64;
60 
61  // Set the number of threads to be used if the user has set NumberOfThreads param
62  // Only set if within limit or valid input
63  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
64  auto numberOfThreads = modelOptions->GetNumberOfThreads();
65 
66  if (numberOfThreads != 0 && numberOfThreads >= MIN_THREADS && numberOfThreads <= MAX_THREADS)
67  {
68  arm_compute::Scheduler::get().set_num_threads(numberOfThreads);
69  }
70  }
71 }
72 
73 NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager)
74  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
75 {
76  SetNumberOfThreads();
77 }
78 
79 NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager,
81  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
82 {
83  SetNumberOfThreads();
84 }
85 
86 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
87  TensorShape const& subTensorShape,
88  unsigned int const* subTensorOrigin) const
89 {
90  const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
91 
93  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
94  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
95  {
96  // Arm compute indexes tensor coords in reverse order.
97  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
98  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
99  }
100 
101  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
102  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
103  {
104  return nullptr;
105  }
106 
107  return std::make_unique<NeonSubTensorHandle>(
108  PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
109 }
110 
111 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
112  const bool IsMemoryManaged) const
113 {
114  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
115  if (IsMemoryManaged)
116  {
117  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
118  }
119  return tensorHandle;
120 }
121 
122 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
123  DataLayout dataLayout,
124  const bool IsMemoryManaged) const
125 {
126  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
127  if (IsMemoryManaged)
128  {
129  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
130  }
131  return tensorHandle;
132 }
133 
134 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type,
135  const QueueDescriptor& descriptor,
136  const WorkloadInfo& info) const
137 {
138  switch(type)
139  {
140  case LayerType::Activation :
141  {
142  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
143  return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor, info);
144  }
145  case LayerType::Addition :
146  {
147  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
148  return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor, info);
149  }
150  case LayerType::ArgMinMax :
151  {
152  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
153  return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
154  }
156  {
157  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
158  bool isFastMathEnabled = false;
159  if (m_ModelContextPtr)
160  {
161  if (m_ModelContextPtr.get() != nullptr)
162  {
163  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
164  if (modelOptions)
165  {
166  isFastMathEnabled = modelOptions->IsFastMathEnabled();
167  }
168  }
169  }
170  return std::make_unique<NeonBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info, isFastMathEnabled);
171  }
173  {
174  auto batchNormalizationQueueDescriptor
175  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
176  return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor, info);
177  }
179  {
180  auto batchToSpaceNdQueueDescriptor
181  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
182  return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
183  }
184  case LayerType::Cast :
185  {
186  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
187  return std::make_unique<NeonCastWorkload>(*castQueueDescriptor, info);
188  }
190  {
191  auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
192  return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
193  }
194  case LayerType::Comparison :
195  {
196  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
197  return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor, info);
198  }
199  case LayerType::Concat :
200  {
201  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
202  return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor, info);
203  }
204  case LayerType::Constant :
205  {
206  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
207  return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
208  }
210  {
211  auto convertFp16ToFp32QueueDescriptor
212  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
213  return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
214  }
216  {
217  auto convertFp32ToFp16QueueDescriptor
218  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
219  return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
220  }
222  {
223  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
224  bool isFastMathEnabled = false;
225  if (m_ModelContextPtr)
226  {
227  if (m_ModelContextPtr.get() != nullptr)
228  {
229  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
230  if (modelOptions)
231  {
232  isFastMathEnabled = modelOptions->IsFastMathEnabled();
233  }
234  }
235  }
236  return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
237  info,
238  m_MemoryManager->GetIntraLayerManager(),
239  isFastMathEnabled);
240  }
242  {
243  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
244  bool isFastMathEnabled = false;
245  if (m_ModelContextPtr)
246  {
247  if (m_ModelContextPtr.get() != nullptr)
248  {
249  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
250  if (modelOptions)
251  {
252  isFastMathEnabled = modelOptions->IsFastMathEnabled();
253  }
254  }
255  }
256  return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
257  info,
258  m_MemoryManager->GetIntraLayerManager(),
259  isFastMathEnabled);
260  }
261  case LayerType::Debug :
262  {
263  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
264  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor, info);
265  }
267  {
268  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
269  return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
270  }
272  {
273  auto depthwiseConvolution2dQueueDescriptor
274  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
275  return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor, info);
276  }
277  case LayerType::Dequantize :
278  {
279  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
280  return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor, info);
281  }
283  {
284  auto detectionPostProcessQueueDescriptor
285  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
286  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor, info);
287  }
288  case LayerType::Division :
289  {
290  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
291  return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
292  }
294  {
295  auto elementwiseBinaryQueueDescriptor
296  = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
297  switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
298  {
300  {
301  AdditionQueueDescriptor additionQueueDescriptor;
302  additionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
303  additionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
304  return std::make_unique<NeonAdditionWorkload>(additionQueueDescriptor, info);
305  }
307  {
308  DivisionQueueDescriptor divisionQueueDescriptor;
309  divisionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
310  divisionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
311  return std::make_unique<NeonDivisionWorkload>(divisionQueueDescriptor, info);
312  }
314  {
315  MaximumQueueDescriptor maximumQueueDescriptor;
316  maximumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
317  maximumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
318  return std::make_unique<NeonMaximumWorkload>(maximumQueueDescriptor, info);
319  }
321  {
322  MinimumQueueDescriptor minimumQueueDescriptor;
323  minimumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
324  minimumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
325  return std::make_unique<NeonMinimumWorkload>(minimumQueueDescriptor, info);
326  }
328  {
329  MultiplicationQueueDescriptor multiplicationQueueDescriptor;
330  multiplicationQueueDescriptor.m_Inputs = descriptor.m_Inputs;
331  multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
332  return std::make_unique<NeonMultiplicationWorkload>(multiplicationQueueDescriptor, info);
333  }
336  {
337  return std::make_unique<NeonElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
338  }
340  {
341  SubtractionQueueDescriptor subtractionQueueDescriptor;
342  subtractionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
343  subtractionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
344  return std::make_unique<NeonSubtractionWorkload>(subtractionQueueDescriptor, info);
345  }
346  default:
347  return nullptr;
348  }
349  }
351  {
352  auto elementwiseUnaryQueueDescriptor
353  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
354  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
355  {
356  case UnaryOperation::Abs:
357  {
358  AbsQueueDescriptor absQueueDescriptor;
359  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
360  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
361  return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
362  }
363  case UnaryOperation::Exp:
364  return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor, info);
366  return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor, info);
367  case UnaryOperation::Log:
368  return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor, info);
369  case UnaryOperation::Neg:
370  return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor, info);
372  {
373  RsqrtQueueDescriptor rsqrtQueueDescriptor;
374  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
375  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
376  return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
377  }
378  case UnaryOperation::Sin:
379  return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor, info);
381  return std::make_unique<NeonSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info);
382  default:
383  return nullptr;
384  }
385  }
386  case LayerType::Fill :
387  {
388  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
389  return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor, info);
390  }
391  case LayerType::Floor :
392  {
393  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
394  return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info);
395  }
397  {
398  auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
399  return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
400  info,
401  m_MemoryManager->GetIntraLayerManager());
402  }
403  case LayerType::Fused :
404  {
405  auto fusedQueueDescriptor = PolymorphicDowncast<const FusedQueueDescriptor*>(&descriptor);
406  return std::make_unique<NeonFusedWorkload>(*fusedQueueDescriptor, info);
407  }
408  case LayerType::Gather :
409  {
410  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
411  return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor, info);
412  }
413  case LayerType::GatherNd :
414  {
415  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
416  return std::make_unique<NeonGatherNdWorkload>(*gatherNdQueueDescriptor, info);
417  }
418  case LayerType::Input :
419  {
420  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
421  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
422  }
424  {
425  auto instanceNormalizationQueueDescriptor
426  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
427  return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
428  }
430  {
431  auto l2NormalizationQueueDescriptor
432  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
433  return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
434  (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
435  }
436  case LayerType::LogSoftmax :
437  {
438  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
439  return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
440  info,
441  m_MemoryManager->GetIntraLayerManager());
442  }
444  {
445  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
446  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
447  {
449  return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor, info);
451  return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor, info);
452  default:
453  return nullptr;
454  }
455  }
456  case LayerType::Lstm :
457  {
458  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
459  return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info);
460  }
461  case LayerType::Maximum :
462  {
463  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
464  return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor, info);
465  }
466  case LayerType::Mean :
467  {
468  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
469  return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor, info);
470  }
471  case LayerType::MemCopy :
472  {
473  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
474  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
475  {
476  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
477  }
478  return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
479  }
480  case LayerType::MemImport :
481  {
482  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
483  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
484  {
485  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
486  }
487  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
488  }
489  case LayerType::Minimum :
490  {
491  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
492  return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor, info);
493  }
495  {
496  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
497  return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor, info);
498  }
500  {
501  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
502  return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
503  (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
504  }
505  case LayerType::Output :
506  {
507  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
508  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
509  }
510  case LayerType::Pad :
511  {
512  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
513  return std::make_unique<NeonPadWorkload>(*padQueueDescriptor, info);
514  }
515  case LayerType::Permute :
516  {
517  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
518  return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor, info);
519  }
520  case LayerType::Pooling2d :
521  {
522  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
523  return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor, info);
524  }
525  case LayerType::Pooling3d :
526  {
527  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
528  return std::make_unique<NeonPooling3dWorkload>(*pooling3dQueueDescriptor, info);
529  }
531  {
532  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
533  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info);
534  }
535  case LayerType::Prelu :
536  {
537  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
538  return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor, info);
539  }
540  case LayerType::QLstm :
541  {
542  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
543  return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor, info);
544  }
545  case LayerType::Quantize :
546  {
547  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
548  return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor, info);
549  }
551  {
552  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
553  return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info);
554  }
555  case LayerType::Rank :
556  {
557  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
558  return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor, info);
559  }
560  case LayerType::Reduce :
561  {
562  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
563  return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor, info);
564  }
565  case LayerType::Reshape :
566  {
567  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
568  return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor, info);
569  }
570  case LayerType::Resize :
571  {
572  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
573  return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor, info);
574  }
575  case LayerType::ReverseV2 :
576  {
577  auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
578  return std::make_unique<NeonReverseV2Workload>(*reverseV2QueueDescriptor, info);
579  }
580  case LayerType::Slice :
581  {
582  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
583  return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor, info);
584  }
585  case LayerType::Softmax :
586  {
587  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
588  return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
589  info,
590  m_MemoryManager->GetIntraLayerManager());
591  }
593  {
594  auto spaceToBatchNdQueueDescriptor
595  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
596  return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
597  }
599  {
600  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
601  return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
602  }
603  case LayerType::Splitter :
604  {
605  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
606  return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor, info);
607  }
608  case LayerType::Stack :
609  {
610  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
611  return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor, info);
612  }
614  {
615  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
616  return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
617  }
619  {
620  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
621  return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor, info);
622  }
623  case LayerType::Tile:
624  {
625  auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
626  return std::make_unique<NeonTileWorkload>(*tileQueueDescriptor, info);
627  }
628  case LayerType::Transpose :
629  {
630  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
631  return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor, info);
632  }
634  {
635  auto transposeConvolution2dQueueDescriptor
636  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
637  return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
638  info,
639  m_MemoryManager->GetIntraLayerManager());
640  }
642  {
643  auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
644  if ((info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
645  (info.m_InputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
646  (info.m_InputTensorInfos[2].GetDataType() == armnn::DataType::Float32) &&
647  (info.m_OutputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
648  (info.m_OutputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
649  (info.m_OutputTensorInfos[2].GetDataType() == armnn::DataType::Float32))
650  {
651  return std::make_unique<NeonUnidirectionalSequenceLstmFloatWorkload>(*desc, info);
652  }
653  else
654  {
655  return std::make_unique<NeonUnidirectionalSequenceLstmWorkload>(*desc, info);
656  }
657  }
658  default:
659  return nullptr;
660  }
661 }
662 
663 } // namespace armnn
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::AbsQueueDescriptor
Definition: WorkloadData.hpp:651
armnn::BinaryOperation::Mul
@ Mul
armnn::LayerType::Permute
@ Permute
armnn::BinaryOperation::Add
@ Add
armnn::NeonWorkloadFactory::CreateTensorHandle
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
Definition: NeonWorkloadFactory.cpp:111
armnn::DivisionQueueDescriptor
Definition: WorkloadData.hpp:270
armnn::MaximumQueueDescriptor
Definition: WorkloadData.hpp:282
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::Optional
Definition: Optional.hpp:270
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::NeonBackendModelContext::IsFastMathEnabled
bool IsFastMathEnabled() const
Definition: NeonBackendModelContext.cpp:53
armnn::NeonWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: NeonWorkloadFactory.cpp:34
armnn::MultiplicationQueueDescriptor
Definition: WorkloadData.hpp:264
armnn::NeonBackendModelContext
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
Definition: NeonBackendModelContext.hpp:19
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Floor
@ Floor
armnn::BinaryOperation::Sub
@ Sub
NeonTensorHandle.hpp
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::DataType::Float32
@ Float32
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::LogicalBinaryOperation::LogicalOr
@ LogicalOr
armnn::LayerType::Tile
@ Tile
armnn::ITensorHandle::GetShape
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
armnn::LayerType::Stack
@ Stack
IgnoreUnused.hpp
armnn::LayerType::Normalization
@ Normalization
armnn::IBackendInternal
Definition: IBackendInternal.hpp:77
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::UnaryOperation::Neg
@ Neg
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
NumericCast.hpp
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::Layer
Definition: Layer.hpp:230
armnn::NeonWorkloadFactory::CreateWorkload
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
Definition: NeonWorkloadFactory.cpp:134
armnn::LayerType::Slice
@ Slice
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::BinaryOperation::Maximum
@ Maximum
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::BinaryOperation::SqDiff
@ SqDiff
armnn::UnaryOperation::Rsqrt
@ Rsqrt
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::UnaryOperation::Sqrt
@ Sqrt
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::NeonWorkloadFactory::NeonWorkloadFactory
NeonWorkloadFactory(const std::shared_ptr< NeonMemoryManager > &memoryManager)
Definition: NeonWorkloadFactory.cpp:73
armnn::LayerType::LogicalBinary
@ LogicalBinary
NeonWorkloadFactory.hpp
armnn::LayerType::Concat
@ Concat
armnn::UnaryOperation::Exp
@ Exp
Utils.hpp
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
PolymorphicDowncast.hpp
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::UnaryOperation::Sin
@ Sin
armnn::LayerType::Quantize
@ Quantize
armnn::AdditionQueueDescriptor
Definition: WorkloadData.hpp:258
armnn::RsqrtQueueDescriptor
Definition: WorkloadData.hpp:497
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::LayerType::Multiplication
@ Multiplication
NeonBackendId.hpp
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::BoostLogSeverityMapping::info
@ info
armnn::BinaryOperation::Power
@ Power
armnn::NeonWorkloadFactory::CreateSubTensorHandle
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
Definition: NeonWorkloadFactory.cpp:86
MakeWorkloadHelper.hpp
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1614
armnn::LayerType::Division
@ Division
MemImportWorkload.hpp
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::UnaryOperation::Log
@ Log
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LogicalBinaryOperation::LogicalAnd
@ LogicalAnd
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
TensorHandle.hpp
armnn::SubtractionQueueDescriptor
Definition: WorkloadData.hpp:276
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
MemCopyWorkload.hpp
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::LayerType::Fused
@ Fused
armnn::LayerType::Minimum
@ Minimum
NeonWorkloadUtils.hpp
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::BackendId
Definition: BackendId.hpp:75
armnn::BinaryOperation::Minimum
@ Minimum
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::NeonWorkloadFactory::GetBackendId
const BackendId & GetBackendId() const override
Definition: NeonWorkloadFactory.cpp:49
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
Layer.hpp
armnn::LayerType::Rank
@ Rank
armnn::MinimumQueueDescriptor
Definition: WorkloadData.hpp:473
armnn::LayerType::Mean
@ Mean
armnn::UnaryOperation::Abs
@ Abs
NeonBackendModelContext.hpp
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::LayerType::Input
@ Input
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::LayerType::Resize
@ Resize
armnn::BinaryOperation::Div
@ Div
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::NeonBackendId
constexpr const char * NeonBackendId()
Definition: NeonBackendId.hpp:10
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::NeonBackendModelContext::GetNumberOfThreads
unsigned int GetNumberOfThreads() const
Definition: NeonBackendModelContext.cpp:58
armnn::LayerType::Convolution3d
@ Convolution3d
NeonWorkloads.hpp
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::LayerType::QLstm
@ QLstm
armnn::IBackendInternal::IBackendSpecificModelContextPtr
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Definition: IBackendInternal.hpp:96
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant