ArmNN
 25.11
Loading...
Searching...
No Matches
RefWorkloadFactory.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#include <Layer.hpp>
6
11
13#include "RefBackendId.hpp"
14#include "RefTensorHandle.hpp"
16
17namespace armnn
18{
19
20namespace
21{
22static const BackendId s_Id{RefBackendId()};
23}
24template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
25std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
26 const WorkloadInfo& info) const
27{
28 return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
29 (descriptor, info);
30}
31
32template <DataType ArmnnType>
34{
35 auto checkType = [](const TensorInfo& tensorInfo) {return tensorInfo.GetDataType() == ArmnnType;};
36 auto it = std::find_if(std::begin(info.m_InputTensorInfos), std::end(info.m_InputTensorInfos), checkType);
37 if (it != std::end(info.m_InputTensorInfos))
38 {
39 return true;
40 }
41 it = std::find_if(std::begin(info.m_OutputTensorInfos), std::end(info.m_OutputTensorInfos), checkType);
42 if (it != std::end(info.m_OutputTensorInfos))
43 {
44 return true;
45 }
46 return false;
47}
84
85RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
86 : m_MemoryManager(memoryManager)
87{
88}
89
91 : m_MemoryManager(new RefMemoryManager())
92{
93}
94
96{
97 return s_Id;
98}
99
101 Optional<DataType> dataType,
102 std::string& outReasonIfUnsupported)
103{
104 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
105}
106
108 Optional<DataType> dataType,
109 std::string& outReasonIfUnsupported,
110 const ModelOptions& modelOptions)
111{
112 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
113}
114
115std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
116 const bool isMemoryManaged) const
117{
118 if (isMemoryManaged)
119 {
120 return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
121 }
122 else
123 {
124 return std::make_unique<RefTensorHandle>(tensorInfo);
125 }
126}
127
128std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
129 DataLayout dataLayout,
130 const bool isMemoryManaged) const
131{
132 // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
133 // to unmanaged memory. This also ensures memory alignment.
134 IgnoreUnused(isMemoryManaged, dataLayout);
135
136 if (isMemoryManaged)
137 {
138 return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
139 }
140 else
141 {
142 return std::make_unique<RefTensorHandle>(tensorInfo);
143 }
144}
145
146std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
147 const QueueDescriptor& descriptor,
148 const WorkloadInfo& info) const
149{
150 switch(type)
151 {
153 {
154 auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
155 return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
156 }
158 {
159 auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
160 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
161 {
162 return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
163 }
164 else
165 {
166 return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
167 }
168 }
170 {
171 auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
172 return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
173 }
175 {
176 auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
177 return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
178 }
180 {
181 auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
182 return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
183 }
185 {
186 auto batchToSpaceNdQueueDescriptor
188 return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
189 }
191 {
192 auto broadcastToQueueDescriptor = PolymorphicDowncast<const BroadcastToQueueDescriptor*>(&descriptor);
193 return std::make_unique<RefBroadcastToWorkload>(*broadcastToQueueDescriptor, info);
194 }
195 case LayerType::Cast :
196 {
197 auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
198 return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
199 }
201 {
202 auto channelShuffleQueueDescriptor
204 return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
205 }
207 {
208 auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
209 return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
210 }
211 case LayerType::Concat :
212 {
213 auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
214 return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
215 }
217 {
218 auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
219 return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
220 }
222 {
223 auto convertFp16ToFp32QueueDescriptor
225 return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
226 }
228 {
229 auto convertFp32ToFp16QueueDescriptor
231 return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
232 }
234 {
235 auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
236 return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
237 }
239 {
240 auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
241 return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
242 }
243 case LayerType::Debug:
244 {
245 auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
246 if (IsBFloat16(info))
247 {
248 return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
249 }
250 if (IsFloat16(info))
251 {
252 return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
253 }
254 if (IsQSymmS16(info))
255 {
256 return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
257 }
258 if (IsQSymmS8(info))
259 {
260 return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
261 }
262 if (IsQAsymmU8(info))
263 {
264 return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
265 }
266 if (IsQAsymmS8(info))
267 {
268 return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
269 }
270 if (IsSigned32(info))
271 {
272 return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
273 }
274 if (IsSigned64(info))
275 {
276 return std::make_unique<RefDebugSigned64Workload>(*debugQueueDescriptor, info);
277 }
278 if (IsBoolean(info))
279 {
280 return std::make_unique<RefDebugBooleanWorkload>(*debugQueueDescriptor, info);
281 }
282 return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
283 }
285 {
286 auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
287 return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
288 }
290 {
291 auto depthwiseConvolution2DQueueDescriptor
293 return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
294 }
296 {
297 auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
298 return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
299 }
301 {
302 auto detectionPostProcessQueueDescriptor
304 return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
305 }
307 {
308 auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
309 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
310 {
311 return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
312 }
313 else
314 {
315 return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
316 }
317 }
319 {
320 auto elementwiseBinaryQueueDescriptor
322 return std::make_unique<RefElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
323 }
325 {
326 auto elementwiseUnaryQueueDescriptor
328 if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
329 {
330 return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
331 }
332 return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
333 }
335 {
336 auto fakeQuantizationQueueDescriptor
338 return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
339 }
340 case LayerType::Fill:
341 {
342 auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
343 return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
344 }
345 case LayerType::Floor:
346 {
347 auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
348 if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
349 {
350 return nullptr;
351 }
352 else
353 {
354 return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
355 }
356 }
358 {
359 auto fullyConnectedQueueDescriptor
361 return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
362 }
364 {
365 auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
366 return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
367 }
369 {
370 auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
371 return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
372 }
373 case LayerType::Input:
374 {
375 auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
376 if (info.m_InputTensorInfos.empty() )
377 {
378 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
379 }
380 if (info.m_OutputTensorInfos.empty())
381 {
382 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
383 }
384 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
385 {
386 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
387 "data input and output differ in byte count.");
388 }
389 return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
390 }
392 {
393 auto instanceNormalizationQueueDescriptor
395 return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
396 }
398 {
399 auto l2NormalizationQueueDescriptor
401 return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
402 }
404 {
405 auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
406 return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
407 }
409 {
410 auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
411 return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
412 }
413 case LayerType::Lstm:
414 {
415 auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
416 return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
417 }
419 {
420 auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
421 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
422 {
423 return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
424 }
425 else
426 {
427 return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
428 }
429 }
430 case LayerType::Mean:
431 {
432 auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
433 return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
434 }
436 {
437 auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
438 if (descriptor.m_Inputs.empty())
439 {
440 throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
441 }
442 return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
443 }
445 {
446 auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
447 if (descriptor.m_Inputs.empty())
448 {
449 throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
450 }
451 return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
452 }
454 {
455 auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
456 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
457 {
458 return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
459 }
460 else
461 {
462 return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
463 }
464 }
466 {
467 auto multiplicationQueueDescriptor
469 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
470 {
471 return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
472 }
473 else
474 {
475 return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
476 }
477 }
479 {
480 auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
481 return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
482 }
484 {
485 auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
486 if (info.m_InputTensorInfos.empty() )
487 {
488 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
489 }
490 if (info.m_OutputTensorInfos.empty())
491 {
492 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
493 }
494 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
495 {
496 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
497 "differ in byte count.");
498 }
499 return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
500 }
501 case LayerType::Pad:
502 {
503 auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
504 return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
505 }
507 {
508 auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
509 if (IsQSymmS16(info))
510 {
511 return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
512 }
513 else if (IsBFloat16(info))
514 {
515 return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
516 }
517 else if (IsQAsymmS8(info))
518 {
519 return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
520 }
522 NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
523 }
525 {
526 auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
527 return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
528 }
530 {
531 auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
532 return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
533 }
535 {
536 return nullptr;
537 }
538 case LayerType::Prelu:
539 {
540 auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
541 return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
542 }
543 case LayerType::QLstm:
544 {
545 auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
546 return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
547 }
549 {
550 auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
551 return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
552 }
553 case LayerType::Rank:
554 {
555 auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
556 return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
557 }
559 {
560 auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
561 return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
562 }
564 {
565 auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
566 return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
567 }
569 {
570 auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
571 return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
572 }
574 {
575 auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
576 return std::make_unique<RefReverseV2Workload>(*reverseV2QueueDescriptor, info);
577 }
579 {
580 auto scatterQueueDescriptor = PolymorphicDowncast<const ScatterNdQueueDescriptor*>(&descriptor);
581 return std::make_unique<RefScatterNdWorkload>(*scatterQueueDescriptor, info);
582 }
583 case LayerType::Shape:
584 {
585 auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
586 return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
587 }
588 case LayerType::Slice:
589 {
590 auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
591 return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
592 }
594 {
595 auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
596 return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
597 }
599 {
600 auto spaceToBatchNdQueueDescriptor
602 return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
603 }
605 {
606 auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
607 return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
608 }
610 {
611 auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
612 return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
613 }
614 case LayerType::Stack:
615 {
616 auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
617 return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
618 }
620 {
621 auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
622 return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
623 }
625 {
626 auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
627 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
628 {
629 return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
630 }
631 else
632 {
633 return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
634 }
635 }
636 case LayerType::Tile:
637 {
638 auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
639 return std::make_unique<RefTileWorkload>(*tileQueueDescriptor, info);
640 }
642 {
643 auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
644 if (IsQSymmS16(info))
645 {
646 return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
647 }
648 else if (IsBFloat16(info))
649 {
650 return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
651 }
652 else if (IsQAsymmS8(info))
653 {
654 return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
655 }
658 (*transposeQueueDescriptor, info);
659 }
661 {
662 auto transposeConvolution2dQueueDescriptor
664 return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
665 }
667 {
668 auto unidirectionalSequenceLstmQueueDescriptor
670 return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
671 info);
672 }
673 default:
674 return nullptr;
675 }
676}
677
678} // namespace armnn
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
const BackendId & GetBackendId() const override
Copyright (c) 2021 ARM Limited and Contributors.
bool IsQSymmS16(const WorkloadInfo &info)
RefTransposeWorkload< DataType::QAsymmU8 > RefTransposeQAsymm8Workload
bool IsSigned32(const WorkloadInfo &info)
bool IsBFloat16(const WorkloadInfo &info)
bool IsDataType(const WorkloadInfo &info)
RefPermuteWorkload< DataType::QAsymmU8 > RefPermuteQAsymm8Workload
RefPermuteWorkload< DataType::Float32 > RefPermuteFloat32Workload
constexpr bool IsQuantizedType()
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
@ UnidirectionalSequenceLstm
Definition Types.hpp:496
std::vector< BackendOptions > ModelOptions
bool IsBoolean(const WorkloadInfo &info)
bool IsQAsymmS8(const WorkloadInfo &info)
constexpr const char * RefBackendId()
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
bool IsQAsymmU8(const WorkloadInfo &info)
bool IsFloat16(const WorkloadInfo &info)
RefPermuteWorkload< DataType::Float16 > RefPermuteFloat16Workload
DataLayout
Definition Types.hpp:63
RefTransposeWorkload< DataType::Float32 > RefTransposeFloat32Workload
bool IsQSymmS8(const WorkloadInfo &info)
RefTransposeWorkload< DataType::Float16 > RefTransposeFloat16Workload
bool IsSigned64(const WorkloadInfo &info)
void IgnoreUnused(Ts &&...)
std::vector< ITensorHandle * > m_Inputs
Contains information about TensorInfos of a layer.