ArmNN
 25.11
Loading...
Searching...
No Matches
Runtime.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017, 2022-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
7#include "Runtime.hpp"
8
10
11#include <armnn/Version.hpp>
14#include <armnn/Logging.hpp>
15
17
19
22
23#if !defined(ARMNN_DISABLE_DYNAMIC_BACKENDS)
25#endif
26
28
29#include <client/include/backends/IBackendProfiling.hpp>
30
31#include <common/include/LabelsAndEventClasses.hpp>
32
33#include <iostream>
34
35
36using namespace armnn;
37using namespace std;
38
39namespace armnn
40{
42
44
45IRuntime::~IRuntime() = default;
46
48{
49 return new IRuntime(options);
50}
51
56
58{
59 delete runtime;
60}
61
63{
64 return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network));
65}
66
69 std::string& errorMessage)
70{
71 return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage);
72}
73
76 std::string& errorMessage,
77 const INetworkProperties& networkProperties)
78{
79 return pRuntimeImpl->LoadNetwork(networkIdOut, std::move(network), errorMessage, networkProperties);
80}
81
83{
84 return pRuntimeImpl->GetInputTensorInfo(networkId, layerId);
85}
86
88{
89 return pRuntimeImpl->GetOutputTensorInfo(networkId, layerId);
90}
91
92std::vector<ImportedInputId> IRuntime::ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
93 MemorySource forceImportMemorySource)
94{
95 return pRuntimeImpl->ImportInputs(networkId, inputTensors, forceImportMemorySource);
96}
97
98std::vector<ImportedOutputId> IRuntime::ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
99 MemorySource forceImportMemorySource)
100{
101 return pRuntimeImpl->ImportOutputs(networkId, outputTensors, forceImportMemorySource);
102}
103
105 const InputTensors& inputTensors,
106 const OutputTensors& outputTensors,
107 std::vector<ImportedInputId> preImportedInputIds,
108 std::vector<ImportedOutputId> preImportedOutputIds)
109{
110 return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors,
111 preImportedInputIds, preImportedOutputIds);
112}
113
115{
116 return pRuntimeImpl->UnloadNetwork(networkId);
117}
118
120{
121 return pRuntimeImpl->GetDeviceSpec();
122}
123
124const std::shared_ptr<IProfiler> IRuntime::GetProfiler(NetworkId networkId) const
125{
126 return pRuntimeImpl->GetProfiler(networkId);
127}
128
130{
131 return pRuntimeImpl->RegisterDebugCallback(networkId, func);
132}
133
134int RuntimeImpl::GenerateNetworkId()
135{
136 return m_NetworkIdCounter++;
137}
138
140{
141 std::string ignoredErrorMessage;
142 return LoadNetwork(networkIdOut, std::move(inNetwork), ignoredErrorMessage);
143}
144
146 IOptimizedNetworkPtr inNetwork,
147 std::string& errorMessage)
148{
150 return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
151}
152
154 IOptimizedNetworkPtr inNetwork,
155 std::string& errorMessage,
156 const INetworkProperties& networkProperties)
157{
158 // Register the profiler
159 auto profiler = inNetwork->GetProfiler();
161
162 IOptimizedNetwork* rawNetwork = inNetwork.release();
163
164 networkIdOut = GenerateNetworkId();
165
166 for (auto&& context : m_BackendContexts)
167 {
168 context.second->BeforeLoadNetwork(networkIdOut);
169 }
170
171 unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
172 std::unique_ptr<IOptimizedNetwork>(rawNetwork),
173 errorMessage,
174 networkProperties,
175 m_ProfilingService.get());
176
177 if (!loadedNetwork)
178 {
179 return Status::Failure;
180 }
181
182 {
183#if !defined(ARMNN_DISABLE_THREADS)
184 std::lock_guard<std::mutex> lockGuard(m_Mutex);
185#endif
186
187 // Stores the network
188 m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
189 }
190
191 for (auto&& context : m_BackendContexts)
192 {
193 context.second->AfterLoadNetwork(networkIdOut);
194 }
195
196 if (m_ProfilingService->IsProfilingEnabled())
197 {
198 m_ProfilingService->IncrementCounterValue(arm::pipe::NETWORK_LOADS);
199 }
200
201 return Status::Success;
202}
203
205{
206 bool unloadOk = true;
207 for (auto&& context : m_BackendContexts)
208 {
209 unloadOk &= context.second->BeforeUnloadNetwork(networkId);
210 }
211
212 if (!unloadOk)
213 {
214 ARMNN_LOG(warning) << "RuntimeImpl::UnloadNetwork(): failed to unload "
215 "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
216 return Status::Failure;
217 }
218
219 std::unique_ptr<arm::pipe::TimelineUtilityMethods> timelineUtils =
220 arm::pipe::TimelineUtilityMethods::GetTimelineUtils(*m_ProfilingService.get());
221 {
222#if !defined(ARMNN_DISABLE_THREADS)
223 std::lock_guard<std::mutex> lockGuard(m_Mutex);
224#endif
225
226 // If timeline recording is on mark the Network end of life
227 if (timelineUtils)
228 {
229 auto search = m_LoadedNetworks.find(networkId);
230 if (search != m_LoadedNetworks.end())
231 {
232 arm::pipe::ProfilingGuid networkGuid = search->second->GetNetworkGuid();
233 timelineUtils->RecordEvent(networkGuid,
234 arm::pipe::LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
235 }
236 }
237
238 if (m_LoadedNetworks.erase(networkId) == 0)
239 {
240 ARMNN_LOG(warning) << "WARNING: RuntimeImpl::UnloadNetwork(): " << networkId << " not found!";
241 return Status::Failure;
242 }
243
244 if (m_ProfilingService->IsProfilingEnabled())
245 {
246 m_ProfilingService->IncrementCounterValue(arm::pipe::NETWORK_UNLOADS);
247 }
248 }
249
250 for (auto&& context : m_BackendContexts)
251 {
252 context.second->AfterUnloadNetwork(networkId);
253 }
254
255 // Unregister the profiler
257
258 ARMNN_LOG(debug) << "RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
259 return Status::Success;
260}
261
262const std::shared_ptr<IProfiler> RuntimeImpl::GetProfiler(NetworkId networkId) const
263{
264 auto it = m_LoadedNetworks.find(networkId);
265 if (it != m_LoadedNetworks.end())
266 {
267 auto& loadedNetwork = it->second;
268 return loadedNetwork->GetProfiler();
269 }
270
271 return nullptr;
272}
273
274void RuntimeImpl::ReportStructure(arm::pipe::IProfilingService& profilingService)
275{
276 if (profilingService.IsProfilingEnabled())
277 {
278 LoadedNetworks::iterator it = m_LoadedNetworks.begin();
279 while (it != m_LoadedNetworks.end())
280 {
281 auto& loadedNetwork = it->second;
282 loadedNetwork->SendNetworkStructure(profilingService);
283 // Increment the Iterator to point to next entry
284 it++;
285 }
286 }
287}
288
289void RuntimeImpl::InitialiseProfilingService(arm::pipe::IProfilingService& profilingService)
290{
292 initialiser.InitialiseProfilingService(profilingService);
293}
294
296 : m_NetworkIdCounter(0)
297{
298 m_ProfilingService = arm::pipe::IProfilingService::CreateProfilingService(
299 arm::pipe::MAX_ARMNN_COUNTER,
300 *this,
301 arm::pipe::ARMNN_SOFTWARE_INFO,
302 arm::pipe::ARMNN_SOFTWARE_VERSION,
303 arm::pipe::ARMNN_HARDWARE_VERSION,
304 *this);
305 const auto start_time = armnn::GetTimeNow();
306 ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION;
308 {
309 throw RuntimeException(
310 "It is not possible to enable timeline reporting without profiling being enabled");
311 }
312#if !defined(ARMNN_DISABLE_DYNAMIC_BACKENDS)
313 // Load any available/compatible dynamic backend before the runtime
314 // goes through the backend registry
315 LoadDynamicBackends(options.m_DynamicBackendsPath);
316#endif
317 armnn::BackendIdSet supportedBackends;
318 for (const auto& id : BackendRegistryInstance().GetBackendIds())
319 {
320 // Store backend contexts for the supported ones
321 try {
322 auto factoryFun = BackendRegistryInstance().GetFactory(id);
323
324 if (!factoryFun)
325 {
326 throw armnn::NullPointerException("Factory Function should not be null.");
327 }
328
329 auto backend = factoryFun();
330
331 auto customAllocatorMapIterator = options.m_CustomAllocatorMap.find(id);
332 if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end() &&
333 customAllocatorMapIterator->second == nullptr)
334 {
335#if !defined(ARMNN_DISABLE_DYNAMIC_BACKENDS)
336 // We need to manually clean up the dynamic backends before throwing an exception.
337 DynamicBackendUtils::DeregisterDynamicBackends(m_DeviceSpec.GetDynamicBackends());
338 m_DeviceSpec.ClearDynamicBackends();
339#endif
340 throw armnn::Exception("Allocator associated with id " + id.Get() + " is null");
341 }
342
343 // If the runtime is created in protected mode only add backends that support this mode
344 if (options.m_ProtectedMode)
345 {
346 // check if backend supports ProtectedMode
348 BackendCapability protectedContentCapability {"ProtectedContentAllocation", true};
349 if (!HasMatchingCapability(protectedContentCapability, id))
350 {
351 // Protected Content Allocation is not supported by the backend
352 // backend should not be registered
353 ARMNN_LOG(warning) << "Backend "
354 << id
355 << " is not registered as does not support protected content allocation.";
356 continue;
357 }
358 // The user is responsible to provide a custom memory allocator which allows to allocate
359 // protected memory
360 if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
361 {
362 std::string err;
363 if (customAllocatorMapIterator->second->GetMemorySourceType()
365 {
366 if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
367 {
368 ARMNN_LOG(error) << "The backend "
369 << id
370 << " reported an error when entering protected mode. Backend won't be"
371 << " used. ErrorMsg: " << err;
372 continue;
373 }
374 // No errors so register the Custom Allocator with the BackendRegistry
375 BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
376 m_AllocatorsAddedByThisRuntime.emplace(id);
377 }
378 else
379 {
380 ARMNN_LOG(error) << "The CustomAllocator provided with the runtime options doesn't support "
381 "protected memory. Protected mode can't be activated. The backend "
382 << id
383 << " is not going to be used. MemorySource must be MemorySource::DmaBufProtected";
384 continue;
385 }
386 }
387 else
388 {
389 ARMNN_LOG(error) << "Protected mode can't be activated for backend: "
390 << id
391 << " no custom allocator was provided to the runtime options.";
392 continue;
393 }
394 }
395 else
396 {
397 // If a custom memory allocator is provided make the backend use that instead of the default
398 if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end())
399 {
400 std::string err;
401 if (!backend->UseCustomMemoryAllocator(customAllocatorMapIterator->second, err))
402 {
403 ARMNN_LOG(error) << "The backend "
404 << id
405 << " reported an error when trying to use the provided custom allocator."
406 " Backend won't be used."
407 << " ErrorMsg: " << err;
408 continue;
409 }
410 // No errors so register the Custom Allocator with the BackendRegistry
411 BackendRegistryInstance().RegisterAllocator(id, customAllocatorMapIterator->second);
412 m_AllocatorsAddedByThisRuntime.emplace(id);
413 }
414 }
415
416 // check if custom memory optimizer strategy map is set
417 if (!options.m_MemoryOptimizerStrategyMap.empty())
418 {
419 auto customMemoryOptimizerStrategyMapIterator = options.m_MemoryOptimizerStrategyMap.find(id);
420 // if a memory optimizer strategy is provided make the backend use that instead of the default
421 if (customMemoryOptimizerStrategyMapIterator != options.m_MemoryOptimizerStrategyMap.end())
422 {
423 // no errors.. register the memory optimizer strategy with the BackendRegistry
425 id, customMemoryOptimizerStrategyMapIterator->second);
426
427 ARMNN_LOG(info) << "MemoryOptimizerStrategy "
428 << customMemoryOptimizerStrategyMapIterator->second->GetName()
429 << " set for the backend " << id << ".";
430 }
431 }
432 else
433 {
434 // check if to use one of the existing memory optimizer strategies is set
435 std::string memoryOptimizerStrategyName = "";
436 ParseOptions(options.m_BackendOptions, id, [&](std::string name, const BackendOptions::Var& value)
437 {
438 if (name == "MemoryOptimizerStrategy")
439 {
440 memoryOptimizerStrategyName = ParseStringBackendOption(value, "");
441 }
442 });
443 if (memoryOptimizerStrategyName != "")
444 {
445 std::shared_ptr<IMemoryOptimizerStrategy> strategy =
446 GetMemoryOptimizerStrategy(memoryOptimizerStrategyName);
447
448 if (!strategy)
449 {
450 ARMNN_LOG(warning) << "MemoryOptimizerStrategy: " << memoryOptimizerStrategyName
451 << " was not found.";
452 }
453 else
454 {
456 auto strategyType = GetMemBlockStrategyTypeName(strategy->GetMemBlockStrategyType());
457 BackendCapability memOptimizeStrategyCapability {strategyType, true};
458 if (HasMatchingCapability(memOptimizeStrategyCapability, id))
459 {
461
462 ARMNN_LOG(info) << "MemoryOptimizerStrategy: "
463 << memoryOptimizerStrategyName << " set for the backend " << id << ".";
464 }
465 else
466 {
467 ARMNN_LOG(warning) << "Backend "
468 << id
469 << " does not have multi-axis packing capability and cannot support"
470 << "MemoryOptimizerStrategy: " << memoryOptimizerStrategyName << ".";
471 }
472 }
473 }
474 }
475
476 auto context = backend->CreateBackendContext(options);
477
478 // backends are allowed to return nullptrs if they
479 // don't wish to create a backend specific context
480 if (context)
481 {
482 m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
483 }
484 supportedBackends.emplace(id);
485
486 unique_ptr<arm::pipe::IBackendProfiling> profilingIface =
487 arm::pipe::IBackendProfiling::CreateBackendProfiling(
489 *m_ProfilingService.get(),
490 id.Get());
491
492 // Backends may also provide a profiling context. Ask for it now.
493 auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface);
494 // Backends that don't support profiling will return a null profiling context.
495 if (profilingContext)
496 {
497 // Pass the context onto the profiling service.
498 m_ProfilingService->AddBackendProfilingContext(id, profilingContext);
499 }
500 }
501 catch (const BackendUnavailableException&)
502 {
503 // Ignore backends which are unavailable
504 }
505 }
506
507 BackendRegistryInstance().SetProfilingService(*m_ProfilingService.get());
508 // pass configuration info to the profiling service
509 m_ProfilingService->ConfigureProfilingService(
510 arm::pipe::ConvertExternalProfilingOptions(options.m_ProfilingOptions));
511 if (options.m_ProfilingOptions.m_EnableProfiling)
512 {
513 // try to wait for the profiling service to initialise
514 m_ProfilingService->WaitForProfilingServiceActivation(3000);
515 }
516
517 m_DeviceSpec.AddSupportedBackends(supportedBackends);
518
519 ARMNN_LOG(info) << "Initialization time: " << std::setprecision(2)
520 << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms.";
521}
522
524{
525 const auto startTime = armnn::GetTimeNow();
526 std::vector<int> networkIDs;
527 try
528 {
529 // Coverity fix: The following code may throw an exception of type std::length_error.
530 std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
531 std::back_inserter(networkIDs),
532 [](const auto &pair) { return pair.first; });
533 }
534 catch (const std::exception& e)
535 {
536 // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
537 // exception of type std::length_error.
538 // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
539 std::cerr << "WARNING: An error has occurred when getting the IDs of the networks to unload: " << e.what()
540 << "\nSome of the loaded networks may not be unloaded" << std::endl;
541 }
542 // We then proceed to unload all the networks which IDs have been appended to the list
543 // up to the point the exception was thrown (if any).
544
545 for (auto networkID : networkIDs)
546 {
547 try
548 {
549 // Coverity fix: UnloadNetwork() may throw an exception of type std::length_error,
550 // boost::log::v2s_mt_posix::odr_violation or boost::log::v2s_mt_posix::system_error
551 UnloadNetwork(networkID);
552 }
553 catch (const std::exception& e)
554 {
555 // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
556 // exception of type std::length_error.
557 // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
558 std::cerr << "WARNING: An error has occurred when unloading network " << networkID << ": " << e.what()
559 << std::endl;
560 }
561 }
562#if !defined(ARMNN_DISABLE_DYNAMIC_BACKENDS)
563 // Clear all dynamic backends.
564 DynamicBackendUtils::DeregisterDynamicBackends(m_DeviceSpec.GetDynamicBackends());
565 m_DeviceSpec.ClearDynamicBackends();
566#endif
567 m_BackendContexts.clear();
568
570 // Remove custom allocators that this runtime has added.
571 // Note: that as backends can be per process and there can be many instances of a runtime in a process an allocator
572 // may have been overwritten by another runtime.
573 for_each(m_AllocatorsAddedByThisRuntime.begin(), m_AllocatorsAddedByThisRuntime.end(),
574 [](BackendId id) {BackendRegistryInstance().DeregisterAllocator(id);});
575
576 ARMNN_LOG(info) << "Shutdown time: " << std::setprecision(2)
577 << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms.";
578}
579
580LoadedNetwork* RuntimeImpl::GetLoadedNetworkPtr(NetworkId networkId) const
581{
582#if !defined(ARMNN_DISABLE_THREADS)
583 std::lock_guard<std::mutex> lockGuard(m_Mutex);
584#endif
585 return m_LoadedNetworks.at(networkId).get();
586}
587
589{
590 return GetLoadedNetworkPtr(networkId)->GetInputTensorInfo(layerId);
591}
592
594{
595 return GetLoadedNetworkPtr(networkId)->GetOutputTensorInfo(layerId);
596}
597
598std::vector<ImportedInputId> RuntimeImpl::ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
599 MemorySource forceImportMemorySource)
600{
601 return GetLoadedNetworkPtr(networkId)->ImportInputs(inputTensors, forceImportMemorySource);
602}
603
604std::vector<ImportedOutputId> RuntimeImpl::ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
605 MemorySource forceImportMemorySource)
606{
607 return GetLoadedNetworkPtr(networkId)->ImportOutputs(outputTensors, forceImportMemorySource);
608}
609
610void RuntimeImpl::ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds)
611{
612 return GetLoadedNetworkPtr(networkId)->ClearImportedInputs(inputIds);
613}
614void RuntimeImpl::ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds)
615{
616 return GetLoadedNetworkPtr(networkId)->ClearImportedOutputs(outputIds);
617}
618
620 const InputTensors& inputTensors,
621 const OutputTensors& outputTensors,
622 std::vector<ImportedInputId> preImportedInputIds,
623 std::vector<ImportedOutputId> preImportedOutputIds)
624{
625 const auto startTime = armnn::GetTimeNow();
626
627 LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
628
629 if (!loadedNetwork)
630 {
631 ARMNN_LOG(error) << "A Network with an id of " << networkId << " does not exist.";
632 return Status::Failure;
633 }
635
637
638 static thread_local NetworkId lastId = networkId;
639 if (lastId != networkId)
640 {
641 LoadedNetworkFuncSafe(lastId, [](LoadedNetwork* network)
642 {
643 network->FreeWorkingMemory();
644 });
645 }
646 lastId=networkId;
647
648 auto status = loadedNetwork->EnqueueWorkload(inputTensors, outputTensors,
649 preImportedInputIds, preImportedOutputIds);
650
651 // Check if we imported, if not there's no need to call the After EnqueueWorkload events
652 if (!preImportedInputIds.empty() || !preImportedOutputIds.empty())
653 {
654 // Call After EnqueueWorkload events
655 for (auto&& context : m_BackendContexts)
656 {
657 context.second->AfterEnqueueWorkload(networkId);
658 }
659 }
660 ARMNN_LOG(info) << "Execution time: " << std::setprecision(2)
661 << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms.";
662 return status;
663}
664
666{
667 LoadedNetwork* loadedNetwork = GetLoadedNetworkPtr(networkId);
668 loadedNetwork->RegisterDebugCallback(func);
669}
670
671#if !defined(ARMNN_DISABLE_DYNAMIC_BACKENDS)
672void RuntimeImpl::LoadDynamicBackends(const std::string& overrideBackendPath)
673{
674 // Get the paths where to load the dynamic backends from
675 std::vector<std::string> backendPaths = DynamicBackendUtils::GetBackendPaths(overrideBackendPath);
676
677 // Get the shared objects to try to load as dynamic backends
678 std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
679
680 // Create a list of dynamic backends
681 m_DynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
682
683 // Register the dynamic backends in the backend registry
684 armnn::BackendIdSet registeredBackendIds = DynamicBackendUtils::RegisterDynamicBackends(m_DynamicBackends);
685
686 // Add the registered dynamic backend ids to the list of supported backends
687 m_DeviceSpec.AddSupportedBackends(registeredBackendIds, true);
688}
689#endif
690} // namespace armnn
#define ARMNN_LOG(severity)
Definition Logging.hpp:212
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
void InitialiseProfilingService(arm::pipe::IProfilingService &profilingService) override
Very basic type safe variant.
void SetProfilingService(armnn::Optional< arm::pipe::IProfilingService & > profilingService)
void RegisterAllocator(const BackendId &id, std::shared_ptr< ICustomAllocator > alloc)
void RegisterMemoryOptimizerStrategy(const BackendId &id, std::shared_ptr< IMemoryOptimizerStrategy > strategy)
FactoryFunction GetFactory(const BackendId &id) const
Class for non-fatal exceptions raised while initialising a backend.
void AddSupportedBackends(const BackendIdSet &backendIds, bool isDynamic=false)
static std::vector< DynamicBackendPtr > CreateDynamicBackends(const std::vector< std::string > &sharedObjects)
static std::vector< std::string > GetSharedObjects(const std::vector< std::string > &backendPaths)
static BackendIdSet RegisterDynamicBackends(const std::vector< DynamicBackendPtr > &dynamicBackends)
static std::vector< std::string > GetBackendPaths(const std::string &overrideBackendPath="")
static void DeregisterDynamicBackends(const BackendIdSet &dynamicBackends)
Base class for all ArmNN exceptions so that users can filter to just those.
Device specific knowledge to be passed to the optimizer.
Definition Types.hpp:302
const IDeviceSpec & GetDeviceSpec() const
Definition Runtime.cpp:119
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition Runtime.cpp:82
static IRuntime * CreateRaw(const CreationOptions &options)
Definition Runtime.cpp:47
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition Runtime.cpp:87
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportOutputs separates the importing and mapping of OutputTensors from network execution.
Definition Runtime.cpp:98
std::unique_ptr< RuntimeImpl > pRuntimeImpl
Definition IRuntime.hpp:263
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors.
Definition Runtime.cpp:129
static void Destroy(IRuntime *runtime)
Definition Runtime.cpp:57
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportInputs separates the importing and mapping of InputTensors from network execution.
Definition Runtime.cpp:92
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition Runtime.cpp:104
static IRuntimePtr Create(const CreationOptions &options)
Definition Runtime.cpp:52
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
Definition Runtime.cpp:114
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition Runtime.cpp:124
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the IRuntime.
Definition Runtime.cpp:62
void RegisterDebugCallback(const DebugCallbackFunction &func)
Status EnqueueWorkload(const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Single thread execution of the loaded network.
const std::shared_ptr< IProfiler > & GetProfiler() const
static std::unique_ptr< LoadedNetwork > MakeLoadedNetwork(std::unique_ptr< IOptimizedNetwork > net, std::string &errorMessage, const INetworkProperties &networkProperties, arm::pipe::IProfilingService *profilingService)
void RegisterProfiler(IProfiler *profiler)
static ProfilerManager & GetInstance()
#define ARMNN_VERSION
ARMNN_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch version num...
Definition Version.hpp:22
ProfilingOptions ConvertExternalProfilingOptions(const armnn::IRuntime::CreationOptions::ExternalProfilingOptions &options)
Copyright (c) 2021 ARM Limited and Contributors.
MemorySource
Define the Memory Source to reduce copies.
Definition Types.hpp:246
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition IRuntime.hpp:39
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition Types.hpp:400
std::unordered_set< BackendId > BackendIdSet
bool HasMatchingCapability(const BackendOptions::BackendOption &capability, const BackendCapabilities &capabilities)
Convenience function to check if a given capability matches a capability in a BackendCapabilities str...
std::unique_ptr< IMemoryOptimizerStrategy > GetMemoryOptimizerStrategy(const std::string &strategyName)
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition INetwork.hpp:340
Status
enumeration
Definition Types.hpp:43
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition Timer.hpp:14
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition Tensor.hpp:394
int NetworkId
Definition IRuntime.hpp:33
constexpr const char * GetMemBlockStrategyTypeName(MemBlockStrategyType memBlockStrategyType)
BackendCapability
BackendCapability class.
Definition Types.hpp:288
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition Types.hpp:311
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition Timer.hpp:19
BackendRegistry & BackendRegistryInstance()
void ParseOptions(const std::vector< BackendOptions > &options, BackendId backend, F f)
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition Tensor.hpp:395
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition Optional.hpp:32
bool m_EnableProfiling
Indicates whether external profiling is enabled or not.
Definition IRuntime.hpp:131
bool m_TimelineEnabled
Indicates whether external timeline profiling is enabled or not.
Definition IRuntime.hpp:133
ExternalProfilingOptions m_ProfilingOptions
Definition IRuntime.hpp:146
std::map< BackendId, std::shared_ptr< IMemoryOptimizerStrategy > > m_MemoryOptimizerStrategyMap
A map to define a custom memory optimizer strategy for specific backend Ids.
Definition IRuntime.hpp:115
std::map< BackendId, std::shared_ptr< ICustomAllocator > > m_CustomAllocatorMap
A map to define a custom memory allocator for specific backend Ids.
Definition IRuntime.hpp:107
bool m_ProtectedMode
Setting this flag will allow the user to create the Runtime in protected mode.
Definition IRuntime.hpp:98
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition IRuntime.hpp:91
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
Definition IRuntime.hpp:182
RuntimeImpl(const IRuntime::CreationOptions &options)
Creates a runtime for workload execution.
Definition Runtime.cpp:295
void ReportStructure(arm::pipe::IProfilingService &profilingService) override
Definition Runtime.cpp:274
void ClearImportedOutputs(NetworkId networkId, const std::vector< ImportedOutputId > outputIds)
Definition Runtime.cpp:614
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors, MemorySource forceImportMemorySource)
Definition Runtime.cpp:604
armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition Runtime.cpp:593
void RegisterDebugCallback(NetworkId networkId, const DebugCallbackFunction &func)
Registers a callback function to debug layers performing custom computations on intermediate tensors.
Definition Runtime.cpp:665
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors, MemorySource forceImportMemorySource)
Definition Runtime.cpp:598
armnn::TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition Runtime.cpp:588
void ClearImportedInputs(NetworkId networkId, const std::vector< ImportedInputId > inputIds)
Definition Runtime.cpp:610
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Definition Runtime.cpp:619
void InitialiseProfilingService(arm::pipe::IProfilingService &profilingService) override
Definition Runtime.cpp:289
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the Runtime.
Definition Runtime.cpp:204
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition Runtime.cpp:262
Status LoadNetwork(NetworkId &networkIdOut, IOptimizedNetworkPtr network)
Loads a complete network into the Runtime.
Definition Runtime.cpp:139