ArmNN
 24.08
RefWorkloadFactory Class Reference

#include <RefWorkloadFactory.hpp>

Inheritance diagram for RefWorkloadFactory:
[legend]
Collaboration diagram for RefWorkloadFactory:
[legend]

Public Member Functions

 RefWorkloadFactory (const std::shared_ptr< RefMemoryManager > &memoryManager)
 
 RefWorkloadFactory ()
 
 ~RefWorkloadFactory ()
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 Backends should implement their own CreateWorkload function with a switch statement. More...
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 
virtual void AfterWorkloadsCreated ()
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 27 of file RefWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ RefWorkloadFactory() [1/2]

RefWorkloadFactory ( const std::shared_ptr< RefMemoryManager > &  memoryManager)
explicit

Definition at line 85 of file RefWorkloadFactory.cpp.

86  : m_MemoryManager(memoryManager)
87 {
88 }

◆ RefWorkloadFactory() [2/2]

Definition at line 90 of file RefWorkloadFactory.cpp.

91  : m_MemoryManager(new RefMemoryManager())
92 {
93 }

◆ ~RefWorkloadFactory()

~RefWorkloadFactory ( )
inline

Definition at line 33 of file RefWorkloadFactory.hpp.

33 {}

Member Function Documentation

◆ CreateSubTensorHandle()

std::unique_ptr<ITensorHandle> CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 49 of file RefWorkloadFactory.hpp.

52  {
53  IgnoreUnused(parent, subTensorShape, subTensorOrigin);
54  return nullptr;
55  }

References armnn::IgnoreUnused().

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 115 of file RefWorkloadFactory.cpp.

117 {
118  if (isMemoryManaged)
119  {
120  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
121  }
122  else
123  {
124  return std::make_unique<RefTensorHandle>(tensorInfo);
125  }
126 }

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 128 of file RefWorkloadFactory.cpp.

131 {
132  // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
133  // to unmanaged memory. This also ensures memory alignment.
134  IgnoreUnused(isMemoryManaged, dataLayout);
135 
136  if (isMemoryManaged)
137  {
138  return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
139  }
140  else
141  {
142  return std::make_unique<RefTensorHandle>(tensorInfo);
143  }
144 }

References armnn::IgnoreUnused().

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Backends should implement their own CreateWorkload function with a switch statement.

The case for the switch should be the LayerType and based on that they will call their specific workload creation functionality.

Implements IWorkloadFactory.

Definition at line 146 of file RefWorkloadFactory.cpp.

149 {
150  switch(type)
151  {
152  case LayerType::Activation :
153  {
154  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
155  return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
156  }
157  case LayerType::Addition :
158  {
159  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
160  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
161  {
162  return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
163  }
164  else
165  {
166  return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
167  }
168  }
169  case LayerType::ArgMinMax :
170  {
171  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
172  return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
173  }
175  {
176  auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
177  return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
178  }
180  {
181  auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
182  return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
183  }
185  {
186  auto batchToSpaceNdQueueDescriptor
187  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
188  return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
189  }
191  {
192  auto broadcastToQueueDescriptor = PolymorphicDowncast<const BroadcastToQueueDescriptor*>(&descriptor);
193  return std::make_unique<RefBroadcastToWorkload>(*broadcastToQueueDescriptor, info);
194  }
195  case LayerType::Cast :
196  {
197  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
198  return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
199  }
201  {
202  auto channelShuffleQueueDescriptor
203  = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
204  return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
205  }
206  case LayerType::Comparison :
207  {
208  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
209  return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
210  }
211  case LayerType::Concat :
212  {
213  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
214  return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
215  }
216  case LayerType::Constant :
217  {
218  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
219  return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
220  }
222  {
223  auto convertFp16ToFp32QueueDescriptor
224  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
225  return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
226  }
228  {
229  auto convertFp32ToFp16QueueDescriptor
230  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
231  return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
232  }
234  {
235  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
236  return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
237  }
239  {
240  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
241  return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
242  }
243  case LayerType::Debug:
244  {
245  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
246  if (IsBFloat16(info))
247  {
248  return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
249  }
250  if (IsFloat16(info))
251  {
252  return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
253  }
254  if (IsQSymmS16(info))
255  {
256  return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
257  }
258  if (IsQSymmS8(info))
259  {
260  return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
261  }
262  if (IsQAsymmU8(info))
263  {
264  return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
265  }
266  if (IsQAsymmS8(info))
267  {
268  return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
269  }
270  if (IsSigned32(info))
271  {
272  return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
273  }
274  if (IsSigned64(info))
275  {
276  return std::make_unique<RefDebugSigned64Workload>(*debugQueueDescriptor, info);
277  }
278  if (IsBoolean(info))
279  {
280  return std::make_unique<RefDebugBooleanWorkload>(*debugQueueDescriptor, info);
281  }
282  return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
283  }
285  {
286  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
287  return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
288  }
290  {
291  auto depthwiseConvolution2DQueueDescriptor
292  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
293  return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
294  }
296  {
297  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
298  return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
299  }
301  {
302  auto detectionPostProcessQueueDescriptor
303  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
304  return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
305  }
306  case LayerType::Division:
307  {
308  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
309  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
310  {
311  return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
312  }
313  else
314  {
315  return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
316  }
317  }
319  {
320  auto elementwiseBinaryQueueDescriptor
321  = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
322  return std::make_unique<RefElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
323  }
325  {
326  auto elementwiseUnaryQueueDescriptor
327  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
328  if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
329  {
330  return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
331  }
332  return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
333  }
335  {
336  auto fakeQuantizationQueueDescriptor
337  = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
338  return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
339  }
340  case LayerType::Fill:
341  {
342  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
343  return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
344  }
345  case LayerType::Floor:
346  {
347  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
348  if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
349  {
350  return nullptr;
351  }
352  else
353  {
354  return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
355  }
356  }
358  {
359  auto fullyConnectedQueueDescriptor
360  = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
361  return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
362  }
363  case LayerType::Gather:
364  {
365  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
366  return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
367  }
368  case LayerType::GatherNd:
369  {
370  auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
371  return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
372  }
373  case LayerType::Input:
374  {
375  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
376  if (info.m_InputTensorInfos.empty() )
377  {
378  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
379  }
380  if (info.m_OutputTensorInfos.empty())
381  {
382  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
383  }
384  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
385  {
386  throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
387  "data input and output differ in byte count.");
388  }
389  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
390  }
392  {
393  auto instanceNormalizationQueueDescriptor
394  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
395  return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
396  }
398  {
399  auto l2NormalizationQueueDescriptor
400  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
401  return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
402  }
404  {
405  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
406  return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
407  }
409  {
410  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
411  return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
412  }
413  case LayerType::Lstm:
414  {
415  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
416  return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
417  }
418  case LayerType::Maximum:
419  {
420  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
421  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
422  {
423  return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
424  }
425  else
426  {
427  return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
428  }
429  }
430  case LayerType::Mean:
431  {
432  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
433  return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
434  }
435  case LayerType::MemCopy:
436  {
437  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
438  if (descriptor.m_Inputs.empty())
439  {
440  throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
441  }
442  return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
443  }
445  {
446  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
447  if (descriptor.m_Inputs.empty())
448  {
449  throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
450  }
451  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
452  }
453  case LayerType::Minimum:
454  {
455  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
456  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
457  {
458  return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
459  }
460  else
461  {
462  return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
463  }
464  }
466  {
467  auto multiplicationQueueDescriptor
468  = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
469  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
470  {
471  return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
472  }
473  else
474  {
475  return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
476  }
477  }
479  {
480  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
481  return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
482  }
483  case LayerType::Output:
484  {
485  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
486  if (info.m_InputTensorInfos.empty() )
487  {
488  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
489  }
490  if (info.m_OutputTensorInfos.empty())
491  {
492  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
493  }
494  if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
495  {
496  throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
497  "differ in byte count.");
498  }
499  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
500  }
501  case LayerType::Pad:
502  {
503  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
504  return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
505  }
506  case LayerType::Permute:
507  {
508  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
509  if (IsQSymmS16(info))
510  {
511  return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
512  }
513  else if (IsBFloat16(info))
514  {
515  return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
516  }
517  else if (IsQAsymmS8(info))
518  {
519  return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
520  }
522  NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
523  }
525  {
526  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
527  return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
528  }
530  {
531  auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
532  return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
533  }
535  {
536  return nullptr;
537  }
538  case LayerType::Prelu:
539  {
540  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
541  return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
542  }
543  case LayerType::QLstm:
544  {
545  auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
546  return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
547  }
548  case LayerType::Quantize:
549  {
550  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
551  return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
552  }
553  case LayerType::Rank:
554  {
555  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
556  return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
557  }
558  case LayerType::Reduce:
559  {
560  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
561  return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
562  }
563  case LayerType::Reshape:
564  {
565  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
566  return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
567  }
568  case LayerType::Resize:
569  {
570  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
571  return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
572  }
574  {
575  auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
576  return std::make_unique<RefReverseV2Workload>(*reverseV2QueueDescriptor, info);
577  }
579  {
580  auto scatterQueueDescriptor = PolymorphicDowncast<const ScatterNdQueueDescriptor*>(&descriptor);
581  return std::make_unique<RefScatterNdWorkload>(*scatterQueueDescriptor, info);
582  }
583  case LayerType::Shape:
584  {
585  auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
586  return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
587  }
588  case LayerType::Slice:
589  {
590  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
591  return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
592  }
593  case LayerType::Softmax:
594  {
595  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
596  return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
597  }
599  {
600  auto spaceToBatchNdQueueDescriptor
601  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
602  return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
603  }
605  {
606  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
607  return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
608  }
609  case LayerType::Splitter:
610  {
611  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
612  return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
613  }
614  case LayerType::Stack:
615  {
616  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
617  return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
618  }
620  {
621  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
622  return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
623  }
625  {
626  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
627  if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
628  {
629  return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
630  }
631  else
632  {
633  return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
634  }
635  }
636  case LayerType::Tile:
637  {
638  auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
639  return std::make_unique<RefTileWorkload>(*tileQueueDescriptor, info);
640  }
642  {
643  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
644  if (IsQSymmS16(info))
645  {
646  return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
647  }
648  else if (IsBFloat16(info))
649  {
650  return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
651  }
652  else if (IsQAsymmS8(info))
653  {
654  return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
655  }
656  return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
657  RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
658  (*transposeQueueDescriptor, info);
659  }
661  {
662  auto transposeConvolution2dQueueDescriptor
663  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
664  return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
665  }
667  {
668  auto unidirectionalSequenceLstmQueueDescriptor
669  = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
670  return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
671  info);
672  }
673  default:
674  return nullptr;
675  }
676 }

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::BroadcastTo, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::info, armnn::Input, armnn::InstanceNormalization, armnn::IsBFloat16(), armnn::IsBoolean(), armnn::IsFloat16(), armnn::IsQAsymmS8(), armnn::IsQAsymmU8(), armnn::IsQSymmS16(), armnn::IsQSymmS8(), armnn::IsQuantizedType(), armnn::IsSigned32(), armnn::IsSigned64(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogicalNot, armnn::LogSoftmax, armnn::Lstm, QueueDescriptor::m_Inputs, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::PreCompiled, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::ReverseV2, armnn::ScatterNd, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Tile, armnn::Transpose, armnn::TransposeConvolution2d, and armnn::UnidirectionalSequenceLstm.

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 95 of file RefWorkloadFactory.cpp.

96 {
97  return s_Id;
98 }

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported,
const ModelOptions modelOptions 
)
static

Definition at line 107 of file RefWorkloadFactory.cpp.

111 {
112  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
113 }

References IWorkloadFactory::IsLayerSupported().

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 100 of file RefWorkloadFactory.cpp.

103 {
104  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
105 }

References IWorkloadFactory::IsLayerSupported().

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Implements IWorkloadFactory.

Definition at line 46 of file RefWorkloadFactory.hpp.

46 { return false; }

The documentation for this class was generated from the following files:
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::IsQAsymmS8
bool IsQAsymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:72
armnn::LayerType::Permute
@ Permute
armnn::LayerType::Splitter
@ Splitter
armnn::IsFloat16
bool IsFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:60
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::IsBFloat16
bool IsBFloat16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:56
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerType::Floor
@ Floor
armnn::RefTransposeFloat32Workload
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
Definition: RefTransposeWorkload.hpp:29
armnn::IsQSymmS16
bool IsQSymmS16(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:64
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::LayerType::Tile
@ Tile
armnn::LayerType::Stack
@ Stack
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::RefPermuteFloat32Workload
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
Definition: RefPermuteWorkload.hpp:29
armnn::LayerType::GatherNd
@ GatherNd
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::IsSigned64
bool IsSigned64(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:48
armnn::RefPermuteFloat16Workload
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
Definition: RefPermuteWorkload.hpp:28
armnn::LayerType::Slice
@ Slice
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Prelu
@ Prelu
armnn::LayerType::ScatterNd
@ ScatterNd
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::IsQAsymmU8
bool IsQAsymmU8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:76
armnn::LayerType::Concat
@ Concat
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Debug
@ Debug
armnn::LayerType::Softmax
@ Softmax
armnn::RefTransposeFloat16Workload
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
Definition: RefTransposeWorkload.hpp:28
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::LayerType::BroadcastTo
@ BroadcastTo
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::LayerType::MemImport
@ MemImport
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::IWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: WorkloadFactory.cpp:1629
armnn::LayerType::Division
@ Division
armnn::DataType::Signed32
@ Signed32
armnn::LayerType::Shape
@ Shape
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::Gather
@ Gather
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::IsBoolean
bool IsBoolean(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:80
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::RefPermuteQAsymm8Workload
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
Definition: RefPermuteWorkload.hpp:31
armnn::LayerType::Reshape
@ Reshape
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Fill
@ Fill
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::PreCompiled
@ PreCompiled
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::LayerType::ReverseV2
@ ReverseV2
armnn::IsQuantizedType
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:311
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Rank
@ Rank
armnn::LayerType::Mean
@ Mean
armnn::IsSigned32
bool IsSigned32(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:52
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::LayerType::Maximum
@ Maximum
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Lstm
@ Lstm
armnn::LayerType::Dequantize
@ Dequantize
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::QLstm
@ QLstm
armnn::RefTransposeQAsymm8Workload
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
Definition: RefTransposeWorkload.hpp:31
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::IsQSymmS8
bool IsQSymmS8(const WorkloadInfo &info)
Definition: RefWorkloadFactory.cpp:68