ArmNN
 24.08
WorkingMemHandle.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "ExecutionData.hpp"
9 #include "Layer.hpp"
10 #include "Network.hpp"
11 #include "WorkingMemDescriptor.hpp"
12 
14 #include <armnn/Tensor.hpp>
16 
17 #include <unordered_map>
18 #include <mutex>
20 
21 namespace armnn
22 {
23 
24 namespace experimental
25 {
26 
27 using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
28 
29 class WorkingMemHandle final : public IWorkingMemHandle
30 {
31 
32 public:
34  {
36 
37  std::vector<std::pair<unsigned int, unsigned int>> m_InputSlotCoords;
38  };
39 
41  {
42  std::vector<LayerBindingId> m_LayerBindingIds;
43 
44  std::pair<unsigned int, unsigned int> m_OutputSlotCoords;
45  std::vector<std::pair<unsigned int, unsigned int>> m_InputSlotCoords;
46  };
47 
48  WorkingMemHandle(NetworkId networkId) : m_NetworkId(networkId){}
49 
50  WorkingMemHandle(NetworkId networkId,
51  std::vector<InputMemDescriptorCoords> inputLayerInfo,
52  std::vector<OutputMemDescriptorCoords> outputLayerInfo,
53  std::vector<WorkingMemDescriptor> workingMemDescriptors,
54  std::unique_ptr<MemoryManager> memoryManager,
55  std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> tensorMemory,
56  std::vector<std::unique_ptr<ITensorHandle>> managedTensorHandles,
57  std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles,
58  std::vector<std::pair<BackendId, ExecutionData>> executionDataVec,
59  BackendPtrMap* backends);
60 
62  { Free(); }
63 
65  {
66  return m_NetworkId;
67  }
68 
69  /// Allocate the backing memory required for execution. If this is not called, then allocation will be
70  /// deferred to execution time.
71  void Allocate() override;
72 
73  /// Free the backing memory required for execution.
74  void Free() override;
75 
76  /// IsAllocated returns true if the backing memory is currently allocated.
77  bool IsAllocated() override
78  {
79  return m_IsAllocated;
80  }
81 
82  /// Get the WorkingMemDescriptor at an index. The WorkingMemDescriptors are stored in the same order as
83  /// the Workloads in a topologically sorted graph.
85  {
86  return m_WorkingMemDescriptors[id];
87  }
88 
89  /// Get the ExecutionData at an index.
90  /// The ExecutionData is paired with a BackendId to be able to call backend specific functions upon it.
91  /// The ExecutionData are stored in the same order as the Workloads in a topologically sorted graph.
92  std::pair<BackendId, ExecutionData>& GetExecutionDataAt(unsigned int id) override
93  {
94  return m_ExecutionDataVec[id];
95  }
96 
98  {
99  return m_InputHandleMap.at(layerBindingId);
100  };
101 
103  {
104  return m_OutputHandleMap.at(layerBindingId);
105  };
106 
107  const std::vector<std::vector<ITensorHandle*>::iterator>& GetInputConnections(LayerBindingId layerBindingId) const
108  {
109  return m_InputConnectionMap.at(layerBindingId);
110  };
111 
112  const std::vector<std::vector<ITensorHandle*>::iterator>& GetOutputConnection(LayerBindingId layerBindingId) const
113  {
114  return m_OutputConnectionMap.at(layerBindingId);
115  };
116 
117  void MemSyncOutputs();
118 
119  std::vector<LayerBindingId>& GetBindingIdVector()
120  {
121  return m_BindingIdVec;
122  };
123 
124  void ValidateBindingIds();
125 
126 private:
127  using DifferenceType = std::vector<ITensorHandle*>::difference_type;
128  NetworkId m_NetworkId;
129 
130  std::unordered_map<LayerBindingId, ITensorHandle*> m_InputHandleMap;
131  std::unordered_map<LayerBindingId, ITensorHandle*> m_OutputHandleMap;
132  std::unordered_map<LayerBindingId, std::vector<std::vector<ITensorHandle*>::iterator>> m_InputConnectionMap;
133  std::unordered_map<LayerBindingId, std::vector<std::vector<ITensorHandle*>::iterator>> m_OutputConnectionMap;
134 
135  std::vector<WorkingMemDescriptor> m_WorkingMemDescriptors;
136 
137  std::unique_ptr<MemoryManager> m_MemoryManager;
138 
139  // Memory to be imported into the tensorHandles after allocation
140  std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> m_TensorMemory;
141 
142  // Tensors that will need to be allocated internally within armnn
143  std::vector<std::unique_ptr<ITensorHandle>> m_ManagedTensorHandles;
144 
145  // Tensors that will be allocated externally by the user
146  std::vector<std::unique_ptr<ITensorHandle>> m_UnmanagedTensorHandles;
147 
148  std::unordered_map<LayerBindingId, bool> m_InputValidationMap;
149  std::unordered_map<LayerBindingId, bool> m_OutputValidationMap;
150 
151  std::vector<LayerBindingId> m_BindingIdVec;
152 
153  DifferenceType m_InputSize;
154 
155  bool m_IsAllocated;
156 
157  std::vector<std::pair<BackendId, ExecutionData>> m_ExecutionDataVec;
158 
159  BackendPtrMap* m_Backends;
160 };
161 
162 } // end experimental namespace
163 
164 } // end armnn namespace
armnn::experimental::WorkingMemHandle::~WorkingMemHandle
~WorkingMemHandle()
Definition: WorkingMemHandle.hpp:61
armnn::experimental::WorkingMemHandle::Free
void Free() override
Free the backing memory required for execution.
Definition: WorkingMemHandle.cpp:125
armnn::experimental::WorkingMemHandle::GetExecutionDataAt
std::pair< BackendId, ExecutionData > & GetExecutionDataAt(unsigned int id) override
Get the ExecutionData at an index.
Definition: WorkingMemHandle.hpp:92
ExecutionData.hpp
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::experimental::WorkingMemHandle::GetOutputConnection
const std::vector< std::vector< ITensorHandle * >::iterator > & GetOutputConnection(LayerBindingId layerBindingId) const
Definition: WorkingMemHandle.hpp:112
armnn::experimental::WorkingMemHandle::IsAllocated
bool IsAllocated() override
IsAllocated returns true if the backing memory is currently allocated.
Definition: WorkingMemHandle.hpp:77
armnn::experimental::WorkingMemHandle::OutputMemDescriptorCoords
Definition: WorkingMemHandle.hpp:40
armnn::experimental::IWorkingMemHandle
Definition: IWorkingMemHandle.hpp:20
armnn::experimental::WorkingMemHandle::GetBindingIdVector
std::vector< LayerBindingId > & GetBindingIdVector()
Definition: WorkingMemHandle.hpp:119
armnn::experimental::WorkingMemHandle::OutputMemDescriptorCoords::m_OutputSlotCoords
std::pair< unsigned int, unsigned int > m_OutputSlotCoords
Definition: WorkingMemHandle.hpp:44
armnn::NetworkId
int NetworkId
Definition: IRuntime.hpp:35
armnn::experimental::WorkingMemHandle::GetNetworkId
NetworkId GetNetworkId() override
Returns the NetworkId of the Network that this IWorkingMemHandle works with.
Definition: WorkingMemHandle.hpp:64
armnn::experimental::WorkingMemHandle::GetInputHandle
ITensorHandle * GetInputHandle(LayerBindingId layerBindingId) const
Definition: WorkingMemHandle.hpp:97
IBackendInternal.hpp
armnn::experimental::WorkingMemHandle::GetInputConnections
const std::vector< std::vector< ITensorHandle * >::iterator > & GetInputConnections(LayerBindingId layerBindingId) const
Definition: WorkingMemHandle.hpp:107
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:309
armnn::experimental::WorkingMemHandle::InputMemDescriptorCoords::m_LayerBindingId
LayerBindingId m_LayerBindingId
Definition: WorkingMemHandle.hpp:35
WorkingMemDescriptor.hpp
armnn::experimental::WorkingMemHandle::MemSyncOutputs
void MemSyncOutputs()
Definition: WorkingMemHandle.cpp:136
armnn::experimental::WorkingMemHandle
Definition: WorkingMemHandle.hpp:29
armnn::experimental::WorkingMemHandle::InputMemDescriptorCoords
Definition: WorkingMemHandle.hpp:33
armnn::experimental::WorkingMemHandle::OutputMemDescriptorCoords::m_LayerBindingIds
std::vector< LayerBindingId > m_LayerBindingIds
Definition: WorkingMemHandle.hpp:42
armnn::experimental::WorkingMemHandle::OutputMemDescriptorCoords::m_InputSlotCoords
std::vector< std::pair< unsigned int, unsigned int > > m_InputSlotCoords
Definition: WorkingMemHandle.hpp:45
Tensor.hpp
MemoryManager.hpp
armnn::experimental::WorkingMemHandle::InputMemDescriptorCoords::m_InputSlotCoords
std::vector< std::pair< unsigned int, unsigned int > > m_InputSlotCoords
Definition: WorkingMemHandle.hpp:37
armnn::experimental::WorkingMemHandle::GetOutputHandle
ITensorHandle * GetOutputHandle(LayerBindingId layerBindingId) const
Definition: WorkingMemHandle.hpp:102
Network.hpp
armnn::MemorySource
MemorySource
Define the Memory Source to reduce copies.
Definition: Types.hpp:244
armnn::experimental::WorkingMemHandle::ValidateBindingIds
void ValidateBindingIds()
Definition: WorkingMemHandle.cpp:145
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::experimental::WorkingMemDescriptor
Definition: WorkingMemDescriptor.hpp:18
Layer.hpp
armnn::experimental::WorkingMemHandle::GetWorkingMemDescriptorAt
WorkingMemDescriptor & GetWorkingMemDescriptorAt(unsigned int id) override
Get the WorkingMemDescriptor at an index.
Definition: WorkingMemHandle.hpp:84
armnn::experimental::BackendPtrMap
std::unordered_map< BackendId, IBackendInternalUniquePtr > BackendPtrMap
Definition: WorkingMemHandle.hpp:27
armnn::experimental::WorkingMemHandle::Allocate
void Allocate() override
Allocate the backing memory required for execution.
Definition: WorkingMemHandle.cpp:100
armnn::experimental::WorkingMemHandle::WorkingMemHandle
WorkingMemHandle(NetworkId networkId)
Definition: WorkingMemHandle.hpp:48
IWorkingMemHandle.hpp