ArmNN
 24.08
RefWorkloadFactory.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "RefMemoryManager.hpp"
8 
9 #include <armnn/Optional.hpp>
12 
13 
14 namespace armnn
15 {
16 
17 template <typename QueueDescriptorType>
18 constexpr bool IsOperationQueueDescriptor(const QueueDescriptorType&) { return true; }
19 template <>
20 constexpr bool IsOperationQueueDescriptor(const MemCopyQueueDescriptor&) { return false; }
21 template <>
22 constexpr bool IsOperationQueueDescriptor(const ConstantQueueDescriptor&) { return false; }
23 template <>
24 constexpr bool IsOperationQueueDescriptor(const PermuteQueueDescriptor&) { return false; }
25 
26 // Reference workload factory.
28 {
29 public:
30  explicit RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager);
32 
34 
35  const BackendId& GetBackendId() const override;
36 
37  static bool IsLayerSupported(const Layer& layer,
38  Optional<DataType> dataType,
39  std::string& outReasonIfUnsupported);
40 
41  static bool IsLayerSupported(const IConnectableLayer& layer,
42  Optional<DataType> dataType,
43  std::string& outReasonIfUnsupported,
44  const ModelOptions& modelOptions);
45 
46  bool SupportsSubTensors() const override { return false; }
47 
48  ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateSubTensorHandle instead")
50  TensorShape const& subTensorShape,
51  unsigned int const* subTensorOrigin) const override
52  {
53  IgnoreUnused(parent, subTensorShape, subTensorOrigin);
54  return nullptr;
55  }
56 
57  ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateTensorHandle instead")
58  std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
59  const bool IsMemoryManaged = true) const override;
60 
62  std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
63  DataLayout dataLayout,
64  const bool IsMemoryManaged = true) const override;
65 
66  std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
67  const QueueDescriptor& descriptor,
68  const WorkloadInfo& info) const override;
69 
70 private:
71  template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
72  std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) const;
73 
74  mutable std::shared_ptr<RefMemoryManager> m_MemoryManager;
75 };
76 
77 } // namespace armnn
armnn::Optional
Definition: Optional.hpp:270
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::IsOperationQueueDescriptor
constexpr bool IsOperationQueueDescriptor(const QueueDescriptorType &)
Definition: RefWorkloadFactory.hpp:18
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::IWorkload
Workload interface to enqueue a layer computation.
Definition: IWorkload.hpp:23
armnn::RefWorkloadFactory::GetBackendId
const BackendId & GetBackendId() const override
Definition: RefWorkloadFactory.cpp:95
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::RefWorkloadFactory::SupportsSubTensors
bool SupportsSubTensors() const override
Definition: RefWorkloadFactory.hpp:46
IgnoreUnused.hpp
WorkloadFactory.hpp
armnn::Layer
Definition: Layer.hpp:230
armnn::ConstantQueueDescriptor
Definition: WorkloadData.hpp:368
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::RefWorkloadFactory
Definition: RefWorkloadFactory.hpp:27
Optional.hpp
armnn::RefWorkloadFactory::~RefWorkloadFactory
~RefWorkloadFactory()
Definition: RefWorkloadFactory.hpp:33
armnn::RefMemoryManager
Definition: RefMemoryManager.hpp:16
RefMemoryManager.hpp
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::RefWorkloadFactory::RefWorkloadFactory
RefWorkloadFactory()
Definition: RefWorkloadFactory.cpp:90
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
armnn::MemCopyQueueDescriptor
Definition: WorkloadData.hpp:86
armnn::RefWorkloadFactory::CreateWorkload
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
Definition: RefWorkloadFactory.cpp:146
armnn::RefWorkloadFactory::CreateTensorHandle
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
Definition: RefWorkloadFactory.cpp:115
ARMNN_DEPRECATED_MSG
#define ARMNN_DEPRECATED_MSG(message)
Definition: Deprecated.hpp:43
std
Definition: BackendId.hpp:149
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::BackendId
Definition: BackendId.hpp:75
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::RefWorkloadFactory::CreateSubTensorHandle
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
Definition: RefWorkloadFactory.hpp:49
armnn::ModelOptions
std::vector< BackendOptions > ModelOptions
Definition: BackendOptions.hpp:18
armnn::RefWorkloadFactory::IsLayerSupported
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
Definition: RefWorkloadFactory.cpp:100
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::PermuteQueueDescriptor
Definition: WorkloadData.hpp:191