ArmNN
 25.11
Loading...
Searching...
No Matches
RefWorkloadFactory.hpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
8
9#include <armnn/Optional.hpp>
12
13
14namespace armnn
15{
16
17template <typename QueueDescriptorType>
18constexpr bool IsOperationQueueDescriptor(const QueueDescriptorType&) { return true; }
19template <>
20constexpr bool IsOperationQueueDescriptor(const MemCopyQueueDescriptor&) { return false; }
21template <>
22constexpr bool IsOperationQueueDescriptor(const ConstantQueueDescriptor&) { return false; }
23template <>
24constexpr bool IsOperationQueueDescriptor(const PermuteQueueDescriptor&) { return false; }
25
26// Reference workload factory.
28{
29public:
30 explicit RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager);
32
34
35 const BackendId& GetBackendId() const override;
36
37 static bool IsLayerSupported(const Layer& layer,
38 Optional<DataType> dataType,
39 std::string& outReasonIfUnsupported);
40
41 static bool IsLayerSupported(const IConnectableLayer& layer,
42 Optional<DataType> dataType,
43 std::string& outReasonIfUnsupported,
44 const ModelOptions& modelOptions);
45
46 bool SupportsSubTensors() const override { return false; }
47
48 ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateSubTensorHandle instead")
50 TensorShape const& subTensorShape,
51 unsigned int const* subTensorOrigin) const override
52 {
53 IgnoreUnused(parent, subTensorShape, subTensorOrigin);
54 return nullptr;
55 }
56
57 ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateTensorHandle instead")
58 std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
59 const bool IsMemoryManaged = true) const override;
60
61 ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateTensorHandle instead")
62 std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
63 DataLayout dataLayout,
64 const bool IsMemoryManaged = true) const override;
65
66 std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
67 const QueueDescriptor& descriptor,
68 const WorkloadInfo& info) const override;
69
70private:
71 template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
72 std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) const;
73
74 mutable std::shared_ptr<RefMemoryManager> m_MemoryManager;
75};
76
77} // namespace armnn
#define ARMNN_DEPRECATED_MSG(message)
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
Workload interface to enqueue a layer computation.
Definition IWorkload.hpp:14
RefWorkloadFactory(const std::shared_ptr< RefMemoryManager > &memoryManager)
std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
Backends should implement their own CreateWorkload function with a switch statement.
static bool IsLayerSupported(const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
const BackendId & GetBackendId() const override
bool SupportsSubTensors() const override
Copyright (c) 2021 ARM Limited and Contributors.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
std::vector< BackendOptions > ModelOptions
DataLayout
Definition Types.hpp:63
constexpr bool IsOperationQueueDescriptor(const QueueDescriptorType &)
void IgnoreUnused(Ts &&...)
Contains information about TensorInfos of a layer.