36 std::string& outReasonIfUnsupported)
43 std::string& outReasonIfUnsupported,
54 void NeonWorkloadFactory::SetNumberOfThreads()
56 if (m_ModelContextPtr)
58 const unsigned int MIN_THREADS = 1;
59 const unsigned int MAX_THREADS = 64;
66 if (numberOfThreads != 0 && numberOfThreads >= MIN_THREADS && numberOfThreads <= MAX_THREADS)
68 arm_compute::Scheduler::get().set_num_threads(numberOfThreads);
74 : m_MemoryManager(memoryManager), m_ModelContextPtr(
IBackendInternal::IBackendSpecificModelContextPtr{})
81 : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
88 unsigned int const* subTensorOrigin)
const
90 const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
98 coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
101 const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.
GetShape());
102 if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
107 return std::make_unique<NeonSubTensorHandle>(
108 PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
112 const bool IsMemoryManaged)
const
114 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
117 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
124 const bool IsMemoryManaged)
const
126 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
129 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
142 auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
143 return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor,
info);
147 auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
148 return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor,
info);
152 auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
153 return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor,
info);
157 auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
158 bool isFastMathEnabled =
false;
159 if (m_ModelContextPtr)
161 if (m_ModelContextPtr.get() !=
nullptr)
170 return std::make_unique<NeonBatchMatMulWorkload>(*batchMatMulQueueDescriptor,
info, isFastMathEnabled);
174 auto batchNormalizationQueueDescriptor
175 = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
176 return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor,
info);
180 auto batchToSpaceNdQueueDescriptor
181 = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
182 return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor,
info);
186 auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
187 return std::make_unique<NeonCastWorkload>(*castQueueDescriptor,
info);
191 auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
192 return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor,
info);
196 auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
197 return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor,
info);
201 auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
202 return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor,
info);
206 auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
207 return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor,
info);
211 auto convertFp16ToFp32QueueDescriptor
212 = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
213 return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor,
info);
217 auto convertFp32ToFp16QueueDescriptor
218 = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
219 return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor,
info);
223 auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
224 bool isFastMathEnabled =
false;
225 if (m_ModelContextPtr)
227 if (m_ModelContextPtr.get() !=
nullptr)
236 return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
238 m_MemoryManager->GetIntraLayerManager(),
243 auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
244 bool isFastMathEnabled =
false;
245 if (m_ModelContextPtr)
247 if (m_ModelContextPtr.get() !=
nullptr)
256 return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
258 m_MemoryManager->GetIntraLayerManager(),
263 auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
264 return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor,
info);
268 auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
269 return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor,
info);
273 auto depthwiseConvolution2dQueueDescriptor
274 = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
275 return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor,
info);
279 auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
280 return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor,
info);
284 auto detectionPostProcessQueueDescriptor
285 = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
286 return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor,
info);
290 auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
291 return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor,
info);
295 auto elementwiseBinaryQueueDescriptor
296 = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
297 switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
304 return std::make_unique<NeonAdditionWorkload>(additionQueueDescriptor,
info);
311 return std::make_unique<NeonDivisionWorkload>(divisionQueueDescriptor,
info);
318 return std::make_unique<NeonFloorDivWorkload>(floorDivQueueDescriptor,
info);
325 return std::make_unique<NeonMaximumWorkload>(maximumQueueDescriptor,
info);
332 return std::make_unique<NeonMinimumWorkload>(minimumQueueDescriptor,
info);
339 return std::make_unique<NeonMultiplicationWorkload>(multiplicationQueueDescriptor,
info);
344 return std::make_unique<NeonElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor,
info);
351 return std::make_unique<NeonSubtractionWorkload>(subtractionQueueDescriptor,
info);
359 auto elementwiseUnaryQueueDescriptor
360 = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
361 switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
366 absQueueDescriptor.
m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
367 absQueueDescriptor.
m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
368 return std::make_unique<NeonAbsWorkload>(absQueueDescriptor,
info);
371 return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor,
info);
373 return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor,
info);
375 return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor,
info);
377 return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor,
info);
381 rsqrtQueueDescriptor.
m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
382 rsqrtQueueDescriptor.
m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
383 return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor,
info);
386 return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor,
info);
388 return std::make_unique<NeonSqrtWorkload>(*elementwiseUnaryQueueDescriptor,
info);
395 auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
396 return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor,
info);
400 auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
401 return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor,
info);
405 auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
406 return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
408 m_MemoryManager->GetIntraLayerManager());
412 auto fusedQueueDescriptor = PolymorphicDowncast<const FusedQueueDescriptor*>(&descriptor);
413 return std::make_unique<NeonFusedWorkload>(*fusedQueueDescriptor,
info);
417 auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
418 return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor,
info);
422 auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
423 return std::make_unique<NeonGatherNdWorkload>(*gatherNdQueueDescriptor,
info);
427 auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
428 return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor,
info);
432 auto instanceNormalizationQueueDescriptor
433 = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
434 return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor,
info);
438 auto l2NormalizationQueueDescriptor
439 = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
440 return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
441 (*l2NormalizationQueueDescriptor,
info, m_MemoryManager->GetIntraLayerManager());
445 auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
446 return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
448 m_MemoryManager->GetIntraLayerManager());
452 auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
453 switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
456 return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor,
info);
458 return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor,
info);
465 auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
466 return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor,
info);
470 auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
471 return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor,
info);
475 auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
476 return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor,
info);
480 auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
481 if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
485 return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor,
info);
489 auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
490 if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
494 return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor,
info);
498 auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
499 return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor,
info);
503 auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
504 return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor,
info);
508 auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
509 return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
510 (*normalizationQueueDescriptor,
info, m_MemoryManager->GetIntraLayerManager());
514 auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
515 return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor,
info);
519 auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
520 return std::make_unique<NeonPadWorkload>(*padQueueDescriptor,
info);
524 auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
525 return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor,
info);
529 auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
530 return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor,
info);
534 auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
535 return std::make_unique<NeonPooling3dWorkload>(*pooling3dQueueDescriptor,
info);
539 auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
540 return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor,
info);
544 auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
545 return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor,
info);
549 auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
550 return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor,
info);
554 auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
555 return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor,
info);
559 auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
560 return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor,
info);
564 auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
565 return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor,
info);
569 auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
570 return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor,
info);
574 auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
575 return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor,
info);
579 auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
580 return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor,
info);
584 auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
585 return std::make_unique<NeonReverseV2Workload>(*reverseV2QueueDescriptor,
info);
589 auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
590 return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor,
info);
594 auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
595 return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
597 m_MemoryManager->GetIntraLayerManager());
601 auto spaceToBatchNdQueueDescriptor
602 = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
603 return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor,
info);
607 auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
608 return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor,
info);
612 auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
613 return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor,
info);
617 auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
618 return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor,
info);
622 auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
623 return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor,
info);
627 auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
628 return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor,
info);
632 auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
633 return std::make_unique<NeonTileWorkload>(*tileQueueDescriptor,
info);
637 auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
638 return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor,
info);
642 auto transposeConvolution2dQueueDescriptor
643 = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
644 return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
646 m_MemoryManager->GetIntraLayerManager());
650 auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
658 return std::make_unique<NeonUnidirectionalSequenceLstmFloatWorkload>(*desc,
info);
662 return std::make_unique<NeonUnidirectionalSequenceLstmWorkload>(*desc,
info);
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
unsigned int GetNumberOfThreads() const
bool IsFastMathEnabled() const
NeonWorkloadFactory(const std::shared_ptr< NeonMemoryManager > &memoryManager)
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
const BackendId & GetBackendId() const override
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Copyright (c) 2021 ARM Limited and Contributors.
constexpr const char * NeonBackendId()
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
@ UnidirectionalSequenceLstm
std::vector< BackendOptions > ModelOptions
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.