ArmNN
 25.11
Loading...
Searching...
No Matches
NeonWorkloadFactory.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "NeonBackendId.hpp"
10
11#include <Layer.hpp>
12
13#include <armnn/Utils.hpp>
17
22
25
26namespace armnn
27{
28
29namespace
30{
31static const BackendId s_Id{NeonBackendId()};
32}
33
35 Optional<DataType> dataType,
36 std::string& outReasonIfUnsupported)
37{
38 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
39}
40
42 Optional<DataType> dataType,
43 std::string& outReasonIfUnsupported,
44 const ModelOptions& modelOptions)
45{
46 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
47}
48
50{
51 return s_Id;
52}
53
54void NeonWorkloadFactory::SetNumberOfThreads()
55{
56 if (m_ModelContextPtr)
57 {
58 const unsigned int MIN_THREADS = 1;
59 const unsigned int MAX_THREADS = 64;
60
61 // Set the number of threads to be used if the user has set NumberOfThreads param
62 // Only set if within limit or valid input
63 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
64 auto numberOfThreads = modelOptions->GetNumberOfThreads();
65
66 if (numberOfThreads != 0 && numberOfThreads >= MIN_THREADS && numberOfThreads <= MAX_THREADS)
67 {
68 arm_compute::Scheduler::get().set_num_threads(numberOfThreads);
69 }
70 }
71}
72
73NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager)
74 : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
75{
76 SetNumberOfThreads();
77}
78
79NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager,
81 : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
82{
83 SetNumberOfThreads();
84}
85
86std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
87 TensorShape const& subTensorShape,
88 unsigned int const* subTensorOrigin) const
89{
90 const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
91
92 arm_compute::Coordinates coords;
93 coords.set_num_dimensions(subTensorShape.GetNumDimensions());
94 for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
95 {
96 // Arm compute indexes tensor coords in reverse order.
97 unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
98 coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
99 }
100
101 const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
102 if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
103 {
104 return nullptr;
105 }
106
107 return std::make_unique<NeonSubTensorHandle>(
108 PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
109}
110
111std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
112 const bool IsMemoryManaged) const
113{
114 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
115 if (IsMemoryManaged)
116 {
117 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
118 }
119 return tensorHandle;
120}
121
122std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
123 DataLayout dataLayout,
124 const bool IsMemoryManaged) const
125{
126 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
127 if (IsMemoryManaged)
128 {
129 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
130 }
131 return tensorHandle;
132}
133
134std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type,
135 const QueueDescriptor& descriptor,
136 const WorkloadInfo& info) const
137{
138 switch(type)
139 {
141 {
142 auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
143 return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor, info);
144 }
146 {
147 auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
148 return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor, info);
149 }
151 {
152 auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
153 return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
154 }
156 {
157 auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
158 bool isFastMathEnabled = false;
159 if (m_ModelContextPtr)
160 {
161 if (m_ModelContextPtr.get() != nullptr)
162 {
163 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
164 if (modelOptions)
165 {
166 isFastMathEnabled = modelOptions->IsFastMathEnabled();
167 }
168 }
169 }
170 return std::make_unique<NeonBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info, isFastMathEnabled);
171 }
173 {
174 auto batchNormalizationQueueDescriptor
176 return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor, info);
177 }
179 {
180 auto batchToSpaceNdQueueDescriptor
182 return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
183 }
184 case LayerType::Cast :
185 {
186 auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
187 return std::make_unique<NeonCastWorkload>(*castQueueDescriptor, info);
188 }
190 {
191 auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
192 return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
193 }
195 {
196 auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
197 return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor, info);
198 }
199 case LayerType::Concat :
200 {
201 auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
202 return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor, info);
203 }
205 {
206 auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
207 return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
208 }
210 {
211 auto convertFp16ToFp32QueueDescriptor
213 return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
214 }
216 {
217 auto convertFp32ToFp16QueueDescriptor
219 return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
220 }
222 {
223 auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
224 bool isFastMathEnabled = false;
225 if (m_ModelContextPtr)
226 {
227 if (m_ModelContextPtr.get() != nullptr)
228 {
229 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
230 if (modelOptions)
231 {
232 isFastMathEnabled = modelOptions->IsFastMathEnabled();
233 }
234 }
235 }
236 return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
237 info,
238 m_MemoryManager->GetIntraLayerManager(),
239 isFastMathEnabled);
240 }
242 {
243 auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
244 bool isFastMathEnabled = false;
245 if (m_ModelContextPtr)
246 {
247 if (m_ModelContextPtr.get() != nullptr)
248 {
249 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
250 if (modelOptions)
251 {
252 isFastMathEnabled = modelOptions->IsFastMathEnabled();
253 }
254 }
255 }
256 return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
257 info,
258 m_MemoryManager->GetIntraLayerManager(),
259 isFastMathEnabled);
260 }
261 case LayerType::Debug :
262 {
263 auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
264 return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor, info);
265 }
267 {
268 auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
269 return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
270 }
272 {
273 auto depthwiseConvolution2dQueueDescriptor
275 return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor, info);
276 }
278 {
279 auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
280 return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor, info);
281 }
283 {
284 auto detectionPostProcessQueueDescriptor
286 return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor, info);
287 }
289 {
290 auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
291 return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
292 }
294 {
295 auto elementwiseBinaryQueueDescriptor
297 switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
298 {
300 {
301 AdditionQueueDescriptor additionQueueDescriptor;
302 additionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
303 additionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
304 return std::make_unique<NeonAdditionWorkload>(additionQueueDescriptor, info);
305 }
307 {
308 DivisionQueueDescriptor divisionQueueDescriptor;
309 divisionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
310 divisionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
311 return std::make_unique<NeonDivisionWorkload>(divisionQueueDescriptor, info);
312 }
314 {
315 DivisionQueueDescriptor floorDivQueueDescriptor;
316 floorDivQueueDescriptor.m_Inputs = descriptor.m_Inputs;
317 floorDivQueueDescriptor.m_Outputs = descriptor.m_Outputs;
318 return std::make_unique<NeonFloorDivWorkload>(floorDivQueueDescriptor, info);
319 }
321 {
322 MaximumQueueDescriptor maximumQueueDescriptor;
323 maximumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
324 maximumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
325 return std::make_unique<NeonMaximumWorkload>(maximumQueueDescriptor, info);
326 }
328 {
329 MinimumQueueDescriptor minimumQueueDescriptor;
330 minimumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
331 minimumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
332 return std::make_unique<NeonMinimumWorkload>(minimumQueueDescriptor, info);
333 }
335 {
336 MultiplicationQueueDescriptor multiplicationQueueDescriptor;
337 multiplicationQueueDescriptor.m_Inputs = descriptor.m_Inputs;
338 multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
339 return std::make_unique<NeonMultiplicationWorkload>(multiplicationQueueDescriptor, info);
340 }
343 {
344 return std::make_unique<NeonElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
345 }
347 {
348 SubtractionQueueDescriptor subtractionQueueDescriptor;
349 subtractionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
350 subtractionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
351 return std::make_unique<NeonSubtractionWorkload>(subtractionQueueDescriptor, info);
352 }
353 default:
354 return nullptr;
355 }
356 }
358 {
359 auto elementwiseUnaryQueueDescriptor
361 switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
362 {
364 {
365 AbsQueueDescriptor absQueueDescriptor;
366 absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
367 absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
368 return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
369 }
371 return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor, info);
373 return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor, info);
375 return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor, info);
377 return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor, info);
379 {
380 RsqrtQueueDescriptor rsqrtQueueDescriptor;
381 rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
382 rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
383 return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
384 }
386 return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor, info);
388 return std::make_unique<NeonSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info);
389 default:
390 return nullptr;
391 }
392 }
393 case LayerType::Fill :
394 {
395 auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
396 return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor, info);
397 }
398 case LayerType::Floor :
399 {
400 auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
401 return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info);
402 }
404 {
405 auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
406 return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
407 info,
408 m_MemoryManager->GetIntraLayerManager());
409 }
410 case LayerType::Fused :
411 {
412 auto fusedQueueDescriptor = PolymorphicDowncast<const FusedQueueDescriptor*>(&descriptor);
413 return std::make_unique<NeonFusedWorkload>(*fusedQueueDescriptor, info);
414 }
415 case LayerType::Gather :
416 {
417 auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
418 return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor, info);
419 }
421 {
422 auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
423 return std::make_unique<NeonGatherNdWorkload>(*gatherNdQueueDescriptor, info);
424 }
425 case LayerType::Input :
426 {
427 auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
428 return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
429 }
431 {
432 auto instanceNormalizationQueueDescriptor
434 return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
435 }
437 {
438 auto l2NormalizationQueueDescriptor
440 return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
441 (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
442 }
444 {
445 auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
446 return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
447 info,
448 m_MemoryManager->GetIntraLayerManager());
449 }
451 {
452 auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
453 switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
454 {
456 return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor, info);
458 return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor, info);
459 default:
460 return nullptr;
461 }
462 }
463 case LayerType::Lstm :
464 {
465 auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
466 return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info);
467 }
468 case LayerType::Maximum :
469 {
470 auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
471 return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor, info);
472 }
473 case LayerType::Mean :
474 {
475 auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
476 return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor, info);
477 }
478 case LayerType::MemCopy :
479 {
480 auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
481 if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
482 {
483 throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
484 }
485 return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
486 }
488 {
489 auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
490 if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
491 {
492 throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
493 }
494 return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
495 }
496 case LayerType::Minimum :
497 {
498 auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
499 return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor, info);
500 }
502 {
503 auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
504 return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor, info);
505 }
507 {
508 auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
509 return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
510 (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
511 }
512 case LayerType::Output :
513 {
514 auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
515 return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
516 }
517 case LayerType::Pad :
518 {
519 auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
520 return std::make_unique<NeonPadWorkload>(*padQueueDescriptor, info);
521 }
522 case LayerType::Permute :
523 {
524 auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
525 return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor, info);
526 }
528 {
529 auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
530 return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor, info);
531 }
533 {
534 auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
535 return std::make_unique<NeonPooling3dWorkload>(*pooling3dQueueDescriptor, info);
536 }
538 {
539 auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
540 return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info);
541 }
542 case LayerType::Prelu :
543 {
544 auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
545 return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor, info);
546 }
547 case LayerType::QLstm :
548 {
549 auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
550 return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor, info);
551 }
553 {
554 auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
555 return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor, info);
556 }
558 {
559 auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
560 return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info);
561 }
562 case LayerType::Rank :
563 {
564 auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
565 return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor, info);
566 }
567 case LayerType::Reduce :
568 {
569 auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
570 return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor, info);
571 }
572 case LayerType::Reshape :
573 {
574 auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
575 return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor, info);
576 }
577 case LayerType::Resize :
578 {
579 auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
580 return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor, info);
581 }
583 {
584 auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
585 return std::make_unique<NeonReverseV2Workload>(*reverseV2QueueDescriptor, info);
586 }
587 case LayerType::Slice :
588 {
589 auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
590 return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor, info);
591 }
592 case LayerType::Softmax :
593 {
594 auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
595 return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
596 info,
597 m_MemoryManager->GetIntraLayerManager());
598 }
600 {
601 auto spaceToBatchNdQueueDescriptor
603 return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
604 }
606 {
607 auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
608 return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
609 }
611 {
612 auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
613 return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor, info);
614 }
615 case LayerType::Stack :
616 {
617 auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
618 return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor, info);
619 }
621 {
622 auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
623 return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
624 }
626 {
627 auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
628 return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor, info);
629 }
630 case LayerType::Tile:
631 {
632 auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
633 return std::make_unique<NeonTileWorkload>(*tileQueueDescriptor, info);
634 }
636 {
637 auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
638 return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor, info);
639 }
641 {
642 auto transposeConvolution2dQueueDescriptor
644 return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
645 info,
646 m_MemoryManager->GetIntraLayerManager());
647 }
649 {
651 if ((info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
652 (info.m_InputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
653 (info.m_InputTensorInfos[2].GetDataType() == armnn::DataType::Float32) &&
654 (info.m_OutputTensorInfos[0].GetDataType() == armnn::DataType::Float32) &&
655 (info.m_OutputTensorInfos[1].GetDataType() == armnn::DataType::Float32) &&
656 (info.m_OutputTensorInfos[2].GetDataType() == armnn::DataType::Float32))
657 {
658 return std::make_unique<NeonUnidirectionalSequenceLstmFloatWorkload>(*desc, info);
659 }
660 else
661 {
662 return std::make_unique<NeonUnidirectionalSequenceLstmWorkload>(*desc, info);
663 }
664 }
665 default:
666 return nullptr;
667 }
668}
669
670} // namespace armnn
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
The NeonBackendModelContext is used to pass in Neon specific backend ModelOptions.
NeonWorkloadFactory(const std::shared_ptr< NeonMemoryManager > &memoryManager)
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
const BackendId & GetBackendId() const override
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition Tensor.cpp:174
Copyright (c) 2021 ARM Limited and Contributors.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
@ UnidirectionalSequenceLstm
Definition Types.hpp:496
std::vector< BackendOptions > ModelOptions
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
DataLayout
Definition Types.hpp:63
constexpr const char * NeonBackendId()
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.