10 #include <fmt/format.h>
15 namespace experimental
19 std::vector<InputMemDescriptorCoords> inputLayerInfo,
20 std::vector<OutputMemDescriptorCoords> outputLayerInfo,
21 std::vector<WorkingMemDescriptor> workingMemDescriptors,
22 std::unique_ptr<MemoryManager> memoryManager,
23 std::vector<std::pair<std::shared_ptr<TensorMemory>,
MemorySource>> tensorMemory,
24 std::vector<std::unique_ptr<ITensorHandle>> managedTensorHandles,
25 std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles,
26 std::vector<std::pair<BackendId, ExecutionData>> executionDataVec,
28 : m_NetworkId(networkId)
29 , m_WorkingMemDescriptors(workingMemDescriptors)
30 , m_MemoryManager(
std::move(memoryManager))
31 , m_TensorMemory(
std::move(tensorMemory))
32 , m_ManagedTensorHandles(
std::move(managedTensorHandles))
33 , m_UnmanagedTensorHandles(
std::move(unmanagedTensorHandles))
34 , m_InputSize(
numeric_cast<DifferenceType>(inputLayerInfo.size()))
35 , m_IsAllocated(false)
36 , m_ExecutionDataVec(executionDataVec)
37 , m_Backends(backends)
39 for (
const auto& inputInfo : inputLayerInfo)
41 m_InputValidationMap[inputInfo.m_LayerBindingId] =
false;
44 auto memDesc = m_WorkingMemDescriptors.at(inputInfo.m_InputSlotCoords[0].first);
45 ITensorHandle* inputTensorHandle = memDesc.m_Inputs[inputInfo.m_InputSlotCoords[0].second];
46 m_InputHandleMap[inputInfo.m_LayerBindingId] = inputTensorHandle;
50 for (
auto inputSlot : inputInfo.m_InputSlotCoords)
54 auto inputPos = workingMemDescriptor.
m_Inputs.begin();
58 inputPos += numeric_cast<DifferenceType>(inputSlot.second);
59 m_InputConnectionMap[inputInfo.m_LayerBindingId].push_back(inputPos);
62 size_t bindingIdCount = inputLayerInfo.size();
63 for (
const auto& outputInfo : outputLayerInfo)
65 for (
auto bindingId : outputInfo.m_LayerBindingIds)
67 m_OutputValidationMap[bindingId] =
false;
70 auto outputPos = m_WorkingMemDescriptors.at(outputInfo.m_OutputSlotCoords.first).m_Outputs.begin();
71 outputPos += numeric_cast<DifferenceType>(outputInfo.m_OutputSlotCoords.second);
73 m_OutputHandleMap[bindingId] = *outputPos;
75 bindingIdCount += outputInfo.m_LayerBindingIds.size();
78 if (outputInfo.m_LayerBindingIds.size() != 1)
84 for (
auto outputSlot : outputInfo.m_InputSlotCoords)
88 auto inputPos = workingMemDescriptor.
m_Inputs.begin();
92 inputPos += numeric_cast<DifferenceType>(outputSlot.second);
93 m_OutputConnectionMap[outputInfo.m_LayerBindingIds[0]].push_back(inputPos);
96 m_BindingIdVec = std::vector<LayerBindingId>(bindingIdCount);
106 m_IsAllocated =
true;
108 m_MemoryManager->Allocate();
110 for (
unsigned int i = 0; i < m_TensorMemory.size(); ++i)
112 m_ManagedTensorHandles[i]->Import(m_TensorMemory[i].first->m_Data, m_TensorMemory[i].second);
116 for (
unsigned int i = 0; i < m_ExecutionDataVec.size(); ++i)
118 auto& backend = m_Backends->at(m_ExecutionDataVec[i].first);
121 m_ExecutionDataVec[i].second = executionData;
131 m_IsAllocated =
false;
133 m_MemoryManager->Deallocate();
138 for (
auto output : m_OutputConnectionMap)
140 (*output.second[0])->
Map(
true);
141 (*output.second[0])->
Unmap();
147 auto resetInputValidationMap = [&]()
149 for (
auto& pair: m_InputValidationMap)
155 auto resetOutputValidationMap = [&]()
157 for (
auto& pair: m_OutputValidationMap)
163 std::for_each(m_BindingIdVec.begin(), m_BindingIdVec.begin() + m_InputSize, [&](
LayerBindingId id)
167 bool& isUsed = m_InputValidationMap.at(id);
170 resetInputValidationMap();
171 throw InvalidArgumentException(fmt::format(
"Duplicate Input LayerBindingId: {}", id));
175 catch (
const std::out_of_range&)
177 resetInputValidationMap();
181 resetInputValidationMap();
183 std::for_each(m_BindingIdVec.begin() + m_InputSize, m_BindingIdVec.end(), [&](
LayerBindingId id)
187 bool& isUsed = m_OutputValidationMap.at(id);
190 resetOutputValidationMap();
191 throw InvalidArgumentException(fmt::format(
"Duplicate Output LayerBindingId: {}", id));
195 catch (
const std::out_of_range&)
197 resetOutputValidationMap();
201 resetOutputValidationMap();