ArmNN
 25.02
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
LoadedNetwork Class Reference

#include <LoadedNetwork.hpp>

Public Types

using WorkloadQueue = std::vector< std::unique_ptr< IWorkload > >
 

Public Member Functions

 ~LoadedNetwork ()
 
TensorInfo GetInputTensorInfo (LayerBindingId layerId) const
 
TensorInfo GetOutputTensorInfo (LayerBindingId layerId) const
 
std::vector< ImportedInputIdImportInputs (const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
 
std::vector< ImportedOutputIdImportOutputs (const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
 
void ClearImportedInputs (const std::vector< ImportedInputId > inputIds)
 
void ClearImportedOutputs (const std::vector< ImportedOutputId > outputIds)
 
Status EnqueueWorkload (const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
 Single thread execution of the loaded network. More...
 
const std::shared_ptr< IProfiler > & GetProfiler () const
 
void FreeWorkingMemory ()
 
void RegisterDebugCallback (const DebugCallbackFunction &func)
 
void SendNetworkStructure (arm::pipe::IProfilingService &profilingService)
 
arm::pipe::ProfilingGuid GetNetworkGuid ()
 

Static Public Member Functions

static std::unique_ptr< LoadedNetworkMakeLoadedNetwork (std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, arm::pipe::IProfilingService *profilingService)
 

Detailed Description

Definition at line 42 of file LoadedNetwork.hpp.

Member Typedef Documentation

◆ WorkloadQueue

using WorkloadQueue = std::vector<std::unique_ptr<IWorkload> >

Definition at line 45 of file LoadedNetwork.hpp.

Constructor & Destructor Documentation

◆ ~LoadedNetwork()

~LoadedNetwork ( )
inline

Definition at line 47 of file LoadedNetwork.hpp.

48  {
50  }

References LoadedNetwork::FreeWorkingMemory().

Member Function Documentation

◆ ClearImportedInputs()

void ClearImportedInputs ( const std::vector< ImportedInputId inputIds)

Definition at line 1525 of file LoadedNetwork.cpp.

1526 {
1527  for (auto id : inputIds)
1528  {
1529  if (id > m_PreImportedInputHandles.size())
1530  {
1531  throw InvalidArgumentException(fmt::format("ClearImportedInputs::Unknown ImportedInputId: {}", id));
1532  }
1533 
1534  auto& importedTensorHandle = m_PreImportedInputHandles[id].m_TensorHandle;
1535  if (!importedTensorHandle)
1536  {
1537  throw InvalidArgumentException(
1538  fmt::format("ClearImportedInputs::ImportedInput with id: {} has already been deleted", id));
1539  }
1540  // Call Unimport then destroy the tensorHandle
1541  importedTensorHandle->Unimport();
1542  importedTensorHandle = {};
1543  }
1544 }

Referenced by RuntimeImpl::ClearImportedInputs().

◆ ClearImportedOutputs()

void ClearImportedOutputs ( const std::vector< ImportedOutputId outputIds)

Definition at line 1546 of file LoadedNetwork.cpp.

1547 {
1548  for (auto id : outputIds)
1549  {
1550  if (id > m_PreImportedOutputHandles.size())
1551  {
1552  throw InvalidArgumentException(fmt::format("ClearImportedOutputs::Unknown ImportedOutputId: {}", id));
1553  }
1554 
1555  auto& importedTensorHandle = m_PreImportedOutputHandles[id].m_TensorHandle;
1556  if (!importedTensorHandle)
1557  {
1558  throw InvalidArgumentException(
1559  fmt::format("ClearImportedOutputs::ImportedOutput with id: {} has already been deleted", id));
1560  }
1561  // Call Unimport then destroy the tensorHandle
1562  importedTensorHandle->Unimport();
1563  importedTensorHandle = {};
1564  }
1565 }

Referenced by RuntimeImpl::ClearImportedOutputs().

◆ EnqueueWorkload()

Status EnqueueWorkload ( const InputTensors inputTensors,
const OutputTensors outputTensors,
std::vector< ImportedInputId preImportedInputIds = {},
std::vector< ImportedOutputId preImportedOutputIds = {} 
)

Single thread execution of the loaded network.

Definition at line 783 of file LoadedNetwork.cpp.

787 {
788  const Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph();
789 
790  // Walk graph to determine the order of execution.
791  if (graph.GetNumLayers() < 2)
792  {
793  ARMNN_LOG(warning) << "IRuntime::EnqueueWorkload()::Less than two nodes in graph";
794  return Status::Failure;
795  }
796 
797  // Data that must be kept alive for the entire execution of the workload.
798  WorkloadData workloadData(inputTensors, outputTensors);
799 
800  // Input tensors can be provided as parameters or pre imported. Either way the number of
801  // tensors should match the number of inputs.
802  if (graph.GetNumInputs() != (inputTensors.size() + preImportedInputIds.size()))
803  {
804  throw InvalidArgumentException("Number of inputs provided does not match network.");
805  }
806 
807  // For each input to the network, call EnqueueInput with the data passed by the user.
808  {
810  m_InputQueue.clear();
811  m_InputQueue.reserve(graph.GetNumInputs());
812 
813  unsigned int inputIndex = 0;
814  unsigned int importedInputIdIndex = 0;
815  std::sort(preImportedInputIds.begin(), preImportedInputIds.end());
816  for (const BindableLayer* inputLayer : graph.GetInputLayers())
817  {
818  if (importedInputIdIndex < preImportedInputIds.size() &&
819  inputIndex == preImportedInputIds[importedInputIdIndex])
820  {
821  // Only replace tensorhandles if they have not already been replaced
822  if (!m_IsInputImported[inputIndex])
823  {
824  auto outputTensorHandle = m_PreImportedInputHandles[inputIndex].m_TensorHandle.get();
825 
826  for (const auto& workloadInfo: m_InputWorkloadSlotPairs[inputLayer->GetBindingId()])
827  {
828  auto workload = m_WorkloadQueue[workloadInfo.m_WorkloadIndex].get();
829  workload->ReplaceInputTensorHandle(outputTensorHandle, workloadInfo.m_SlotIndex);
830  }
831  m_IsInputImported[inputIndex] = true;
832  }
833  importedInputIdIndex++;
834  }
835  else
836  {
837  if (m_IsInputImported[inputIndex])
838  {
839  OutputHandler& handler = const_cast<OutputHandler&>(inputLayer->GetOutputHandler(0));
840 
841  for (const auto& workloadInfo: m_InputWorkloadSlotPairs[inputLayer->GetBindingId()])
842  {
843  auto workload = m_WorkloadQueue[workloadInfo.m_WorkloadIndex].get();
844  workload->ReplaceInputTensorHandle(handler.GetData(), workloadInfo.m_SlotIndex);
845  }
846 
847  m_IsInputImported[inputIndex] = false;
848  }
849 
850  // InputTensorHandle is not imported yet, process to enqueue input
851  const TensorPin& pin = workloadData.GetInputTensorPin(inputLayer->GetBindingId());
852  EnqueueInput(*inputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
853  }
854  inputIndex++;
855  }
856  }
857  // For each output to the network, call EnqueueOutput with the data passed by the user.
858  {
860  m_OutputQueue.clear();
861  m_OutputQueue.reserve(graph.GetNumOutputs());
862 
863  if (preImportedOutputIds.size() > graph.GetNumOutputs())
864  {
865  throw InvalidArgumentException("Invalid number of preImportedOutputIds");
866  }
867 
868  unsigned int outputIndex = 0;
869  unsigned int importedOutputIdIndex = 0;
870  std::sort(preImportedOutputIds.begin(), preImportedOutputIds.end());
871  for (const BindableLayer* outputLayer : graph.GetOutputLayers())
872  {
873  if (importedOutputIdIndex < preImportedOutputIds.size() &&
874  outputIndex == preImportedOutputIds[importedOutputIdIndex])
875  {
876  // Only replace tensorhandles if they have not already been replaced
877  ITensorHandle* inputTensorHandle = m_PreImportedOutputHandles[outputIndex].m_TensorHandle.get();
878 
879  if (!m_IsOutputImported[outputIndex])
880  {
881  const auto bindingId = outputLayer->GetBindingId();
882  const auto& indices = m_OutputWorkloadSlotPairs[bindingId];
883 
884  auto outputWorkload = m_WorkloadQueue[indices.m_OutputSlotIndices.m_WorkloadIndex].get();
885 
886  outputWorkload->ReplaceOutputTensorHandle(inputTensorHandle,
887  indices.m_OutputSlotIndices.m_SlotIndex);
888 
889  for (const auto& workloadInfo: indices.m_InputSlotIndices)
890  {
891  auto inputWorkload = m_WorkloadQueue[workloadInfo.m_WorkloadIndex].get();
892  inputWorkload->ReplaceInputTensorHandle(inputTensorHandle, workloadInfo.m_SlotIndex);
893  }
894  m_IsOutputImported[outputIndex] = true;
895  }
896 
897  if (!inputTensorHandle)
898  {
899  throw armnn::NullPointerException("Data should have been allocated.");
900  }
901 
902  MemSyncQueueDescriptor syncDesc;
903  syncDesc.m_Inputs.push_back(inputTensorHandle);
904  WorkloadInfo info;
905  info.m_InputTensorInfos.push_back(outputLayer->GetInputSlot(0).GetTensorInfo());
906 
907  auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
908  if (!syncWorkload)
909  {
910  throw armnn::NullPointerException("No sync workload created");
911  }
912 
913  m_OutputQueue.push_back(std::move(syncWorkload));
914  importedOutputIdIndex++;
915  }
916  else
917  {
918  if (m_IsOutputImported[outputIndex])
919  {
920  const auto bindingId = outputLayer->GetBindingId();
921  const auto& indices = m_OutputWorkloadSlotPairs[bindingId];
922 
923  auto outputWorkload = m_WorkloadQueue[indices.m_OutputSlotIndices.m_WorkloadIndex].get();
924  const OutputHandler& outputHandler =
925  outputLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOutputHandler();
926 
927  outputWorkload->ReplaceOutputTensorHandle(
928  outputHandler.GetData(), indices.m_OutputSlotIndices.m_SlotIndex);
929 
930  for (const auto& workloadInfo: indices.m_InputSlotIndices)
931  {
932  auto inputWorkload = m_WorkloadQueue[workloadInfo.m_WorkloadIndex].get();
933  inputWorkload->ReplaceInputTensorHandle(outputHandler.GetData(), workloadInfo.m_SlotIndex);
934  }
935  m_IsOutputImported[outputIndex] = false;
936  }
937 
938  const TensorPin& pin = workloadData.GetOutputTensorPin(outputLayer->GetBindingId());
939  // OutputTensorHandle is not imported yet, process to enqueue Output
940  EnqueueOutput(*outputLayer, pin.GetTensorHandle(), pin.GetTensorInfo());
941  }
942  outputIndex++;
943  }
944  }
945 
946  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
947  TimelineUtilityMethods::GetTimelineUtils(*m_ProfilingService);
948  ProfilingGuid inferenceGuid = m_ProfilingService->GetNextGuid();
949  if (timelineUtils)
950  {
951  // Add inference timeline trace if profiling is enabled.
952  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
953  timelineUtils->CreateTypedEntity(inferenceGuid, LabelsAndEventClasses::INFERENCE_GUID);
954  timelineUtils->CreateRelationship(ProfilingRelationshipType::RetentionLink,
955  networkGuid,
956  inferenceGuid,
957  LabelsAndEventClasses::EXECUTION_OF_GUID);
958  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS);
959  }
960 
961  bool executionSucceeded = true;
962 
963  {
964  if (m_ProfilingService->IsProfilingEnabled())
965  {
966  m_ProfilingService->IncrementCounterValue(INFERENCES_RUN);
967  }
969  ARMNN_SCOPED_HEAP_PROFILING("Executing");
970  executionSucceeded = Execute(timelineUtils, inferenceGuid);
971  }
972 
973  if (timelineUtils)
974  {
975  // Add end of life of the inference timeline if profiling is enabled.
976  timelineUtils->RecordEvent(inferenceGuid, LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
977  timelineUtils->Commit();
978  }
979 
980  return executionSucceeded ? Status::Success : Status::Failure;
981 }
#define ARMNN_SCOPED_HEAP_PROFILING(TAG)
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220

References ARMNN_LOG, ARMNN_SCOPED_PROFILING_EVENT, armnn::Failure, OutputHandler::GetData(), Graph::GetInputLayers(), Graph::GetNumInputs(), Graph::GetNumLayers(), Graph::GetNumOutputs(), Graph::GetOutputLayers(), armnn::info, QueueDescriptor::m_Inputs, armnn::Undefined, and armnn::warning.

Referenced by RuntimeImpl::EnqueueWorkload().

◆ FreeWorkingMemory()

void FreeWorkingMemory ( )

Definition at line 1197 of file LoadedNetwork.cpp.

1198 {
1199 #if !defined(ARMNN_DISABLE_THREADS)
1200  std::lock_guard<std::mutex> lockGuard(m_WorkingMemMutex);
1201 #endif
1202 
1203  if (!m_IsWorkingMemAllocated)
1204  {
1205  return;
1206  }
1207 
1208  if (m_ExternalMemoryManager)
1209  {
1210  m_ExternalMemoryManager->Deallocate();
1211  }
1212 
1213  // Informs the memory managers to release memory in its respective memory group
1214  for (auto&& memoryManager : m_BackendMemoryMangers)
1215  {
1216  if (memoryManager)
1217  {
1218  memoryManager->Release();
1219  }
1220  }
1221  m_TensorHandleFactoryRegistry.ReleaseMemory();
1222  m_IsWorkingMemAllocated = false;
1223 }
void ReleaseMemory()
Release memory required for inference.

References TensorHandleFactoryRegistry::ReleaseMemory().

Referenced by RuntimeImpl::EnqueueWorkload(), and LoadedNetwork::~LoadedNetwork().

◆ GetInputTensorInfo()

TensorInfo GetInputTensorInfo ( LayerBindingId  layerId) const

Definition at line 626 of file LoadedNetwork.cpp.

627 {
628  for (auto&& inputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetInputLayers())
629  {
630  if (inputLayer->GetNumOutputSlots() != 1)
631  {
632  throw armnn::GraphValidationException("Input layer should have exactly 1 output slot");
633  }
634 
635  if (inputLayer->GetBindingId() == layerId)
636  {
637  return inputLayer->GetOutputSlot(0).GetTensorInfo();
638  }
639  }
640 
641  throw InvalidArgumentException(fmt::format("No input layer is associated with id {}", layerId));
642 }

Referenced by RuntimeImpl::GetInputTensorInfo().

◆ GetNetworkGuid()

ProfilingGuid GetNetworkGuid ( )

Definition at line 621 of file LoadedNetwork.cpp.

622 {
623  return m_OptimizedNetwork->GetGuid();
624 }

◆ GetOutputTensorInfo()

TensorInfo GetOutputTensorInfo ( LayerBindingId  layerId) const

Definition at line 644 of file LoadedNetwork.cpp.

645 {
646  for (auto&& outputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetOutputLayers())
647  {
648  if (outputLayer->GetNumInputSlots() != 1)
649  {
650  throw armnn::GraphValidationException("Output layer should have exactly 1 input slot");
651  }
652 
653  if (!outputLayer->GetInputSlot(0).GetConnection())
654  {
655  throw armnn::GraphValidationException("Input slot on Output layer must be connected");
656  }
657 
658  if (outputLayer->GetBindingId() == layerId)
659  {
660  return outputLayer->GetInputSlot(0).GetTensorInfo();
661  }
662  }
663 
664  throw InvalidArgumentException(fmt::format("No output layer is associated with id {}", layerId));
665 }

Referenced by RuntimeImpl::GetOutputTensorInfo().

◆ GetProfiler()

const std::shared_ptr<IProfiler>& GetProfiler ( ) const
inline

Definition at line 76 of file LoadedNetwork.hpp.

76 { return m_OptimizedNetwork->GetProfiler(); }

Referenced by RuntimeImpl::EnqueueWorkload().

◆ ImportInputs()

std::vector< ImportedInputId > ImportInputs ( const InputTensors inputTensors,
MemorySource  forceImportMemorySource = MemorySource::Undefined 
)

Definition at line 1402 of file LoadedNetwork.cpp.

1404 {
1405  // Cannot import if import is not enabled and forceImportMemorySource is undefined
1406  if (forceImportMemorySource == MemorySource::Undefined)
1407  {
1408  throw MemoryImportException("ImportInputs: Memory Import failed, NetworkProperties.m_ImportEnabled");
1409  }
1410  // The number of pre imported tensors should not exceed the number of inputs.
1411  if (inputTensors.size() > m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetNumInputs())
1412  {
1413  throw MemoryImportException("ImportInputs: The number of tensors provided exceeds the number of inputs.");
1414  }
1415 
1416  std::vector<ImportedInputId> importedInputs;
1417  Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
1418  unsigned int inputIndex = 0;
1419  for (const BindableLayer* inputLayer : graph.GetInputLayers())
1420  {
1421  auto outputTensorHandle = m_PreImportedInputHandles[inputIndex].m_TensorHandle.get();
1422 
1423  if (!outputTensorHandle)
1424  {
1425  inputIndex++;
1426  continue;
1427  }
1428 
1429  auto layerBindingId = inputLayer->GetBindingId();
1430  auto it = std::find_if(inputTensors.begin(), inputTensors.end(), [=](const auto& inputTensor)
1431  {
1432  return inputTensor.first == layerBindingId;
1433  });
1434 
1435  if (it == inputTensors.end())
1436  {
1437  inputIndex++;
1438  continue;
1439  }
1440 
1441  const auto& inputTensor = *it;
1442  std::unique_ptr<ITensorHandle> passThroughTensorHandle =
1443  std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
1444  inputTensor.second.GetMemoryArea());
1445 
1446  try
1447  {
1448  if (outputTensorHandle->CanBeImported(passThroughTensorHandle->Map(), forceImportMemorySource)
1449  && (outputTensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource)))
1450  {
1451  importedInputs.push_back(inputIndex);
1452  }
1453  passThroughTensorHandle->Unmap();
1454  }
1455  catch(const MemoryImportException& exception)
1456  {
1457  ARMNN_LOG(error) << "An error occurred attempting to import input_"
1458  << inputIndex << " : " << exception.what();
1459  passThroughTensorHandle->Unmap();
1460  }
1461  inputIndex++;
1462  }
1463 
1464  return importedInputs;
1465 }

References ARMNN_LOG, armnn::error, Graph::GetInputLayers(), armnn::Undefined, and Exception::what().

Referenced by RuntimeImpl::ImportInputs().

◆ ImportOutputs()

std::vector< ImportedOutputId > ImportOutputs ( const OutputTensors outputTensors,
MemorySource  forceImportMemorySource = MemorySource::Undefined 
)

Definition at line 1467 of file LoadedNetwork.cpp.

1469 {
1470  // Cannot import if import is not enabled and forceImportMemorySource is undefined
1471  if (forceImportMemorySource == MemorySource::Undefined)
1472  {
1473  throw MemoryImportException("ImportOutputs: Memory Import failed, NetworkProperties.m_ImportEnabled");
1474  }
1475  // If forceImportMemorySource is defined, try import if memory is aligned
1476  if (outputTensors.size() != m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetNumOutputs())
1477  {
1478  throw MemoryImportException("ImportOutputs: Force Import failed, incorrect number of tensors");
1479  }
1480  std::vector<ImportedOutputId> importedOutputs;
1481  Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
1482 
1483  unsigned int outputIndex = 0;
1484  for (const BindableLayer* const outputLayer : graph.GetOutputLayers())
1485  {
1486  auto inputTensorHandle = m_PreImportedOutputHandles[outputIndex].m_TensorHandle.get();
1487  if (!inputTensorHandle)
1488  {
1489  outputIndex++;
1490  continue;
1491  }
1492 
1493  auto layerBindingId = outputLayer->GetBindingId();
1494  auto it = std::find_if(outputTensors.begin(), outputTensors.end(), [=] (const auto& outputTensor)
1495  {
1496  return outputTensor.first == layerBindingId;
1497  });
1498 
1499  if (it == outputTensors.end())
1500  {
1501  outputIndex++;
1502  continue;
1503  }
1504 
1505  const auto outputTensor = *it;
1506  try
1507  {
1508  // Check if the output memory can be imported
1509  if (inputTensorHandle->CanBeImported(outputTensor.second.GetMemoryArea(), forceImportMemorySource)
1510  && inputTensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
1511  {
1512  importedOutputs.push_back(outputIndex);
1513  }
1514  }
1515  catch(const MemoryImportException& exception)
1516  {
1517  ARMNN_LOG(error) << "An error occurred attempting to import output_"
1518  << outputIndex << " : " << exception.what();
1519  }
1520  outputIndex++;
1521  }
1522  return importedOutputs;
1523 }

References ARMNN_LOG, armnn::error, Graph::GetOutputLayers(), armnn::Undefined, and Exception::what().

Referenced by RuntimeImpl::ImportOutputs().

◆ MakeLoadedNetwork()

std::unique_ptr< LoadedNetwork > MakeLoadedNetwork ( std::unique_ptr< IOptimizedNetwork net,
std::string &  errorMessage,
const INetworkProperties networkProperties,
arm::pipe::IProfilingService *  profilingService 
)
static

Definition at line 170 of file LoadedNetwork.cpp.

174 {
175  std::unique_ptr<LoadedNetwork> loadedNetwork;
176 
177  auto Fail = [&](const std::exception& error) -> std::unique_ptr<LoadedNetwork>
178  {
179  errorMessage = ToErrorMessage("An error occurred when preparing the network workloads: ", error);
180  ARMNN_LOG(error) << errorMessage;
181 
182  return std::unique_ptr<LoadedNetwork>();
183  };
184 
185  try
186  {
187  loadedNetwork.reset(new LoadedNetwork(std::move(net), networkProperties, profilingService));
188  }
189  catch (const armnn::RuntimeException& error)
190  {
191  return Fail(error);
192  }
193  catch (const armnn::Exception& error)
194  {
195  return Fail(error);
196  }
197  catch (const std::runtime_error& error)
198  {
199  return Fail(error);
200  }
201 
202  return loadedNetwork;
203 }
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:47

References ARMNN_LOG, and armnn::error.

Referenced by RuntimeImpl::LoadNetwork().

◆ RegisterDebugCallback()

void RegisterDebugCallback ( const DebugCallbackFunction func)

Definition at line 1567 of file LoadedNetwork.cpp.

1568 {
1569  for (auto&& workloadPtr: m_WorkloadQueue)
1570  {
1571  workloadPtr.get()->RegisterDebugCallback(func);
1572  }
1573 }

Referenced by RuntimeImpl::RegisterDebugCallback().

◆ SendNetworkStructure()

void SendNetworkStructure ( arm::pipe::IProfilingService &  profilingService)

Definition at line 583 of file LoadedNetwork.cpp.

584 {
585  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_SendNetworkStructure");
586  Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
587  ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
588 
589  std::unique_ptr<TimelineUtilityMethods> timelineUtils =
590  TimelineUtilityMethods::GetTimelineUtils(profilingService);
591 
592  timelineUtils->CreateTypedEntity(networkGuid, LabelsAndEventClasses::NETWORK_GUID);
593 
594  for (auto&& layer : order)
595  {
596  // Add layer to the post-optimisation network structure
597  AddLayerStructure(timelineUtils, *layer, networkGuid);
598  switch (layer->GetType())
599  {
600  case LayerType::Input:
601  case LayerType::Output:
602  {
603  // Inputs and outputs are treated in a special way - see EnqueueInput() and EnqueueOutput().
604  break;
605  }
606  default:
607  {
608  for (auto& workload : m_WorkloadQueue)
609  {
610  // Add workload to the post-optimisation network structure
611  AddWorkloadStructure(timelineUtils, workload, *layer);
612  }
613  break;
614  }
615  }
616  }
617  // Commit to send the post-optimisation network structure
618  timelineUtils->Commit();
619 }

References ARMNN_SCOPED_PROFILING_EVENT, armnn::Input, armnn::Output, and armnn::Undefined.


The documentation for this class was generated from the following files: