ArmNN
 25.11
Loading...
Searching...
No Matches
ITensorHandleFactory.hpp
Go to the documentation of this file.
1//
2// Copyright © 2017, 2019-2022 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "ITensorHandle.hpp"
10#include <armnn/Tensor.hpp>
11#include <armnn/Types.hpp>
13
14#include <memory>
15#include <string>
16#include <vector>
17
18namespace armnn
19{
21
22/// Capability class to calculate in the GetCapabilities function
23/// so that only the capability in the scope can be choose to calculate
25{
28
29 // add new enum values here
30
32};
33
34/// Capability of the TensorHandleFactory
36{
37 Capability(CapabilityClass capabilityClass, bool value)
38 : m_CapabilityClass(capabilityClass)
39 , m_Value(value)
40 {}
41
43 bool m_Value;
44};
45
47{
48public:
49 using FactoryId = std::string;
50 static const FactoryId LegacyFactoryId; /// Use the workload factory to create the tensor handle
51 static const FactoryId DeferredFactoryId; /// Some TensorHandleFactory decisions are deferred to run-time
52
54
55 virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
56 TensorShape const& subTensorShape,
57 unsigned int const* subTensorOrigin) const = 0;
58
59 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const = 0;
60
61 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
62 DataLayout dataLayout) const = 0;
63
64 /// Utility Functions for backends which require TensorHandles to have unmanaged memory.
65 /// These should be overloaded if required to facilitate direct import of input tensors
66 /// and direct export of output tensors.
67 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
68 const bool IsMemoryManaged) const
69 {
70 IgnoreUnused(IsMemoryManaged);
71 return CreateTensorHandle(tensorInfo);
72 }
73
74 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
75 DataLayout dataLayout,
76 const bool IsMemoryManaged) const
77 {
78 IgnoreUnused(IsMemoryManaged);
79 return CreateTensorHandle(tensorInfo, dataLayout);
80 }
81
82 virtual const FactoryId& GetId() const = 0;
83
84 virtual bool SupportsInPlaceComputation() const { return false; }
85
86 virtual bool SupportsSubTensors() const = 0;
87
88 virtual bool SupportsMapUnmap() const { return true; }
89
90 virtual MemorySourceFlags GetExportFlags() const { return 0; }
91 virtual MemorySourceFlags GetImportFlags() const { return 0; }
92
93 virtual std::vector<Capability> GetCapabilities(const IConnectableLayer* layer,
94 const IConnectableLayer* connectedLayer,
95 CapabilityClass capabilityClass)
96 {
97 IgnoreUnused(layer);
98 IgnoreUnused(connectedLayer);
99 IgnoreUnused(capabilityClass);
100 return std::vector<Capability>();
101 }
102};
103
104enum class EdgeStrategy
105{
106 Undefined, /// No strategy has been defined. Used internally to verify integrity of optimizations.
107 DirectCompatibility, /// Destination backend can work directly with tensors on source backend.
108 ExportToTarget, /// Source backends tensor data can be exported to destination backend tensor without copy.
109 CopyToTarget /// Copy contents from source backend tensor to destination backend tensor.
110};
111
112} //namespace armnn
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
virtual const FactoryId & GetId() const =0
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
virtual bool SupportsSubTensors() const =0
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged) const
Utility Functions for backends which require TensorHandles to have unmanaged memory.
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual MemorySourceFlags GetExportFlags() const
static const FactoryId LegacyFactoryId
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, DataLayout dataLayout) const =0
virtual bool SupportsInPlaceComputation() const
virtual MemorySourceFlags GetImportFlags() const
static const FactoryId DeferredFactoryId
Use the workload factory to create the tensor handle.
virtual ~ITensorHandleFactory()
Some TensorHandleFactory decisions are deferred to run-time.
virtual bool SupportsMapUnmap() const
Copyright (c) 2021 ARM Limited and Contributors.
CapabilityClass
Capability class to calculate in the GetCapabilities function so that only the capability in the scop...
unsigned int MemorySourceFlags
DataLayout
Definition Types.hpp:63
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.
void IgnoreUnused(Ts &&...)
CapabilityClass m_CapabilityClass
Capability(CapabilityClass capabilityClass, bool value)