ArmNN
 24.08
ArmnnPreparedModel Class Referencefinal

#include <ArmnnPreparedModel.hpp>

Inheritance diagram for ArmnnPreparedModel:
[legend]
Collaboration diagram for ArmnnPreparedModel:
[legend]

Public Member Functions

 ArmnnPreparedModel (armnn::NetworkId networkId, armnn::IRuntime *runtime, const Model &model, const std::string &requestInputsAndOutputsDumpDir, const bool gpuProfilingEnabled, Priority priority=Priority::MEDIUM)
 
 ArmnnPreparedModel (armnn::NetworkId networkId, armnn::IRuntime *runtime, const std::string &requestInputsAndOutputsDumpDir, const bool gpuProfilingEnabled, Priority priority=Priority::MEDIUM, const bool prepareModelFromCache=false)
 
virtual ~ArmnnPreparedModel ()
 
ExecutionResult< std::pair< std::vector< OutputShape >, Timing > > execute (const Request &request, MeasureTiming measureTiming, const OptionalTimePoint &deadline, const OptionalDuration &loopTimeoutDuration, const std::vector< android::nn::TokenValuePair > &hints, const std::vector< android::nn::ExtensionNameAndPrefix > &extensionNameToPrefix) const override
 
GeneralResult< std::pair< SyncFence, ExecuteFencedInfoCallback > > executeFenced (const Request &request, const std::vector< SyncFence > &waitFor, MeasureTiming measureTiming, const OptionalTimePoint &deadline, const OptionalDuration &loopTimeoutDuration, const OptionalDuration &timeoutDurationAfterFence, const std::vector< android::nn::TokenValuePair > &hints, const std::vector< android::nn::ExtensionNameAndPrefix > &extensionNameToPrefix) const override
 
GeneralResult< android::nn::SharedExecution > createReusableExecution (const Request &request, MeasureTiming measureTiming, const OptionalDuration &loopTimeoutDuration, const std::vector< android::nn::TokenValuePair > &hints, const std::vector< android::nn::ExtensionNameAndPrefix > &extensionNameToPrefix) const override
 
GeneralResult< SharedBurst > configureExecutionBurst () const override
 
std::any getUnderlyingResource () const override
 
ErrorStatus ExecuteGraph (std::shared_ptr< std::vector< android::nn::RunTimePoolInfo >> &pMemPools, armnn::InputTensors &inputTensors, armnn::OutputTensors &outputTensors, CanonicalExecutionContext callback, const bool pointerMemory=false) const
 execute the graph prepared from the request More...
 
Priority GetModelPriority () const
 
bool ExecuteWithDummyInputs (unsigned int numInputs, unsigned int numOutputs) const
 Executes this model with dummy inputs (e.g. More...
 

Detailed Description

Definition at line 38 of file ArmnnPreparedModel.hpp.

Constructor & Destructor Documentation

◆ ArmnnPreparedModel() [1/2]

ArmnnPreparedModel ( armnn::NetworkId  networkId,
armnn::IRuntime runtime,
const Model model,
const std::string &  requestInputsAndOutputsDumpDir,
const bool  gpuProfilingEnabled,
Priority  priority = Priority::MEDIUM 
)

Definition at line 126 of file ArmnnPreparedModel.cpp.

132  : m_NetworkId(networkId)
133  , m_Runtime(runtime)
134  , m_Model(model)
135  , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
136  , m_GpuProfilingEnabled(gpuProfilingEnabled)
137  , m_ModelPriority(priority)
138  , m_PrepareFromCache(false)
139 {
140  Init();
141 }

◆ ArmnnPreparedModel() [2/2]

ArmnnPreparedModel ( armnn::NetworkId  networkId,
armnn::IRuntime runtime,
const std::string &  requestInputsAndOutputsDumpDir,
const bool  gpuProfilingEnabled,
Priority  priority = Priority::MEDIUM,
const bool  prepareModelFromCache = false 
)

Definition at line 143 of file ArmnnPreparedModel.cpp.

149  : m_NetworkId(networkId)
150  , m_Runtime(runtime)
151  , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
152  , m_GpuProfilingEnabled(gpuProfilingEnabled)
153  , m_ModelPriority(priority)
154  , m_PrepareFromCache(prepareModelFromCache)
155 {
156  Init();
157 }

◆ ~ArmnnPreparedModel()

~ArmnnPreparedModel ( )
virtual

Definition at line 626 of file ArmnnPreparedModel.cpp.

627 {
628  VLOG(DRIVER) << "ArmnnPreparedModel::~ArmnnPreparedModel()";
629  // Get a hold of the profiler used by this model.
630  if (m_GpuProfilingEnabled)
631  {
632  auto profiler = m_Runtime->GetProfiler(m_NetworkId);
633  if (profiler)
634  {
635  // Dump the profiling info to a file if required.
636  DumpJsonProfilingIfRequired(m_GpuProfilingEnabled,
637  m_RequestInputsAndOutputsDumpDir,
638  m_NetworkId,
639  profiler.get());
640  }
641  }
642  // Unload the network associated with this model
643  m_Runtime->UnloadNetwork(m_NetworkId);
644 }

References armnn_driver::DumpJsonProfilingIfRequired(), IRuntime::GetProfiler(), and IRuntime::UnloadNetwork().

Member Function Documentation

◆ configureExecutionBurst()

GeneralResult< SharedBurst > configureExecutionBurst ( ) const
override

Definition at line 599 of file ArmnnPreparedModel.cpp.

600 {
601  return nullptr;
602 }

◆ createReusableExecution()

GeneralResult< SharedExecution > createReusableExecution ( const Request &  request,
MeasureTiming  measureTiming,
const OptionalDuration &  loopTimeoutDuration,
const std::vector< android::nn::TokenValuePair > &  hints,
const std::vector< android::nn::ExtensionNameAndPrefix > &  extensionNameToPrefix 
) const
override

Definition at line 585 of file ArmnnPreparedModel.cpp.

591 {
592  VLOG(DRIVER) << "ArmnnPreparedModel::createReusableExecution()";
593  return std::make_shared<DefaultExecution>(shared_from_this(),
594  request,
595  measureTiming,
596  loopTimeoutDuration);
597 }

◆ execute()

ExecutionResult< std::pair< std::vector< OutputShape >, Timing > > execute ( const Request &  request,
MeasureTiming  measureTiming,
const OptionalTimePoint &  deadline,
const OptionalDuration &  loopTimeoutDuration,
const std::vector< android::nn::TokenValuePair > &  hints,
const std::vector< android::nn::ExtensionNameAndPrefix > &  extensionNameToPrefix 
) const
override

Definition at line 294 of file ArmnnPreparedModel.cpp.

301 {
302  VLOG(DRIVER) << "CanonicalDriver::PreparedModel::execute()";
303 
305  if (measureTiming == MeasureTiming::YES)
306  {
307  ctx.measureTimings = measureTiming;
308  ctx.driverStart = Clock::now();
309  }
310 
311  if (!m_PrepareFromCache)
312  {
313  const auto modelRequest = validateRequestForModel(request, m_Model);
314  if (!modelRequest.ok())
315  {
316  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << modelRequest.error();
317  }
318  VLOG(DRIVER) << "ArmnnPreparedModel::execute(): " << GetModelSummary(m_Model).c_str();
319  }
320  if (hasDeadlinePassed(deadline))
321  {
322  return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
323  }
324 
325  // map the memory pool into shared pointers
326  // use a shared memory pools vector on the heap, as it is passed to the request thread
327  auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
328 
329  // allocate the tensors on the heap, as they are passed to the request thread
330  auto inputTensors = std::make_shared<armnn::InputTensors>();
331  auto outputTensors = std::make_shared<armnn::OutputTensors>();
332 
333  auto isPointerTypeMemory = IsPointerTypeMemory(request);
334  ErrorStatus theErrorStatus = PrepareMemoryForIO(*inputTensors,
335  *outputTensors,
336  *memPools,
337  request,
338  isPointerTypeMemory);
339 
340  switch(theErrorStatus)
341  {
342  case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
343  return NN_ERROR(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE);
344  case ErrorStatus::GENERAL_FAILURE:
345  return NN_ERROR(ErrorStatus::GENERAL_FAILURE);
346  case ErrorStatus::INVALID_ARGUMENT:
347  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT);
348  default:
349  {}
350  }
351 
352  std::vector<OutputShape> outputShapes(outputTensors->size());
353  for (unsigned int i = 0; i < outputTensors->size(); i++)
354  {
355  std::pair<int, armnn::Tensor> outputTensorPair = (*outputTensors)[i];
356  const armnn::Tensor outputTensor = outputTensorPair.second;
357  const armnn::TensorInfo outputTensorInfo = outputTensor.GetInfo();
358 
359  outputShapes[i] = ComputeShape(outputTensorInfo);
360  }
361  Timing theTiming;
362 
363  VLOG(DRIVER) << "ArmnnPreparedModel::execute(...) before ExecuteGraph";
364  auto errorStatus = ExecuteGraph(memPools, *inputTensors, *outputTensors, ctx, isPointerTypeMemory);
365  if (errorStatus != ErrorStatus::NONE)
366  {
367  return NN_ERROR(errorStatus) << "execute() failed";
368  }
369  VLOG(DRIVER) << "ArmnnPreparedModel::execute(...) after ExecuteGraph";
370 
371  return std::make_pair(outputShapes, theTiming);
372 }

References armnn_driver::ComputeShape(), CanonicalExecutionContext::driverStart, ArmnnPreparedModel::ExecuteGraph(), BaseTensor< MemoryType >::GetInfo(), armnn_driver::GetModelSummary(), and CanonicalExecutionContext::measureTimings.

◆ executeFenced()

GeneralResult< std::pair< SyncFence, ExecuteFencedInfoCallback > > executeFenced ( const Request &  request,
const std::vector< SyncFence > &  waitFor,
MeasureTiming  measureTiming,
const OptionalTimePoint &  deadline,
const OptionalDuration &  loopTimeoutDuration,
const OptionalDuration &  timeoutDurationAfterFence,
const std::vector< android::nn::TokenValuePair > &  hints,
const std::vector< android::nn::ExtensionNameAndPrefix > &  extensionNameToPrefix 
) const
override

Definition at line 477 of file ArmnnPreparedModel.cpp.

486 {
487  VLOG(DRIVER) << "ArmnnPreparedModel::executeFenced()";
488 
489  if (!m_PrepareFromCache) {
490  const auto modelRequest = validateRequestForModel(request, m_Model);
491  if (!modelRequest.ok())
492  {
493  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << modelRequest.error();
494  }
495  VLOG(DRIVER) << "ArmnnPreparedModel::executeFenced(): " << GetModelSummary(m_Model).c_str();
496  }
497  if (hasDeadlinePassed(deadline))
498  {
499  return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
500  }
501 
503  if (measureTiming == MeasureTiming::YES)
504  {
505  ctx.measureTimings = measureTiming;
506  ctx.driverStart = Clock::now();
507  }
508 
509  // Wait for the dependent events to signal
510  for (const auto& syncFence : waitFor)
511  {
512  if (!syncFence.getSharedHandle())
513  {
514  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT);
515  }
516  if (syncFence.syncWait({}) != SyncFence::FenceState::SIGNALED)
517  {
518  return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "syncWait failed";
519  }
520  }
521 
522  android::nn::TimePoint fenceExecutionStart;
523  if (measureTiming == MeasureTiming::YES)
524  {
525  fenceExecutionStart = Clock::now();
526  }
527 
528  // map the memory pool into shared pointers
529  // use a shared memory pools vector on the heap, as it is passed to the request thread
530  auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
531 
532  // allocate the tensors on the heap, as they are passed to the request thread
533  auto inputTensors = std::make_shared<armnn::InputTensors>();
534  auto outputTensors = std::make_shared<armnn::OutputTensors>();
535 
536  auto isPointerTypeMemory = IsPointerTypeMemory(request);
537  ErrorStatus theErrorStatus = PrepareMemoryForIO(*inputTensors,
538  *outputTensors,
539  *memPools,
540  request,
541  isPointerTypeMemory);
542 
543  if (theErrorStatus != ErrorStatus::NONE)
544  {
545  return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "executeFenced() failed";
546  }
547 
548  Timing timingSinceLaunch = {};
549  Timing timingAfterFence = {};
550  if (measureTiming == MeasureTiming::YES)
551  {
552  timingAfterFence.timeOnDevice = ctx.deviceEnd - ctx.deviceStart;
553  timingAfterFence.timeInDriver = ctx.driverEnd - fenceExecutionStart;
554  VLOG(DRIVER) << "executeFenced timingSinceLaunch = " << timingAfterFence.timeOnDevice;
555  VLOG(DRIVER) << "executeFenced timingAfterFence = " << timingAfterFence.timeInDriver;
556  }
557 
558  VLOG(DRIVER) << "ArmnnCanonicalPreparedModel::executeFenced(...) before ExecuteGraph";
559  auto errorStatus = ExecuteGraph(memPools, *inputTensors, *outputTensors, ctx, isPointerTypeMemory);
560  VLOG(DRIVER) << "ArmnnCanonicalPreparedModel::executeFenced(...) after ExecuteGraph";
561 
562  ExecuteFencedInfoCallback armnnFencedExecutionCallback =
563  [timingSinceLaunch, timingAfterFence, errorStatus]() {
564 
565  GeneralResult<std::pair<Timing, Timing>> result;
566 
567  switch(errorStatus)
568  {
569  case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
570  result.error().code = (ErrorStatus::OUTPUT_INSUFFICIENT_SIZE);
571  case ErrorStatus::GENERAL_FAILURE:
572  result.error().code = (ErrorStatus::GENERAL_FAILURE);
573  case ErrorStatus::INVALID_ARGUMENT:
574  result.error().code = (ErrorStatus::INVALID_ARGUMENT);
575  default:
576  {
577  result.value() = std::make_pair(timingSinceLaunch, timingAfterFence);
578  }
579  }
580  return result;
581  };
582  return std::make_pair(SyncFence::createAsSignaled(), std::move(armnnFencedExecutionCallback ));
583 }

References CanonicalExecutionContext::deviceEnd, CanonicalExecutionContext::deviceStart, CanonicalExecutionContext::driverEnd, CanonicalExecutionContext::driverStart, ArmnnPreparedModel::ExecuteGraph(), armnn_driver::GetModelSummary(), and CanonicalExecutionContext::measureTimings.

◆ ExecuteGraph()

ErrorStatus ExecuteGraph ( std::shared_ptr< std::vector< android::nn::RunTimePoolInfo >> &  pMemPools,
armnn::InputTensors inputTensors,
armnn::OutputTensors outputTensors,
CanonicalExecutionContext  callback,
const bool  pointerMemory = false 
) const

execute the graph prepared from the request

Definition at line 374 of file ArmnnPreparedModel.cpp.

380 {
381  VLOG(DRIVER) << "ArmnnPreparedModel::ExecuteGraph(...)";
382 
383  DumpTensorsIfRequired("Input", inputTensors);
384  std::vector<armnn::ImportedInputId> importedInputIds;
385  std::vector<armnn::ImportedOutputId> importedOutputIds;
386  try
387  {
388  if (ctx.measureTimings == MeasureTiming::YES)
389  {
390  ctx.deviceStart = Clock::now();
391  }
392  armnn::Status status;
393  VLOG(DRIVER) << "ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false";
394  importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc);
395  if (!importedInputIds.empty())
396  {
397  // Some or all of the input tensors been imported. We need to remove the ones that could from
398  // inputTensors.
399  for (armnn::ImportedInputId& importedId : importedInputIds)
400  {
401  inputTensors.erase(
402  std::remove_if(
403  inputTensors.begin(), inputTensors.end(),
404  [&importedId](std::pair<armnn::LayerBindingId, class armnn::ConstTensor>& element) {
405  return (element.first == static_cast<int>(importedId));
406  }),
407  inputTensors.end());
408  }
409  }
410  importedOutputIds = m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc);
411  if (!importedOutputIds.empty())
412  {
413  // Some or all of the output tensors could not be imported. We need to remove the ones that could
414  // from outputTensors.
415  for (armnn::ImportedInputId& importedId : importedOutputIds)
416  {
417  outputTensors.erase(
418  std::remove_if(
419  outputTensors.begin(), outputTensors.end(),
420  [&importedId](std::pair<armnn::LayerBindingId, class armnn::Tensor>& element) {
421  return (element.first == static_cast<int>(importedId));
422  }),
423  outputTensors.end());
424  }
425  }
426  status = m_Runtime->EnqueueWorkload(m_NetworkId,
427  inputTensors,
428  outputTensors,
429  importedInputIds,
430  importedOutputIds);
431 
432  if (ctx.measureTimings == MeasureTiming::YES)
433  {
434  ctx.deviceEnd = Clock::now();
435  }
436  if (status != armnn::Status::Success)
437  {
438  VLOG(DRIVER) << "ArmnnPreparedModel:ExecuteGraph EnqueueWorkload failed";
439  return ErrorStatus::GENERAL_FAILURE;
440  }
441  }
442  catch (armnn::Exception& e)
443  {
444  VLOG(DRIVER) << "armnn:Exception caught from EnqueueWorkload: " << e.what();
445  return ErrorStatus::GENERAL_FAILURE;
446  }
447  catch (std::exception& e)
448  {
449  VLOG(DRIVER) << "std::exception caught from EnqueueWorkload: " << e.what();
450  return ErrorStatus::GENERAL_FAILURE;
451  }
452 
453  if (!pointerMemory && (!importedInputIds.empty() || !importedOutputIds.empty()))
454  {
455  CommitPools(*pMemPools);
456  }
457  DumpTensorsIfRequired("Output", outputTensors);
458 
459  if (ctx.measureTimings == MeasureTiming::YES)
460  {
461  ctx.driverEnd = Clock::now();
462  Timing timing;
463  timing.timeOnDevice = ctx.deviceEnd - ctx.deviceStart;
464  timing.timeInDriver = ctx.driverEnd - ctx.driverStart;
465  VLOG(DRIVER) << "ArmnnPreparedModel::execute timing - Device = "
466  << timing.timeOnDevice << "Driver = " << timing.timeInDriver;
467  }
468  return ErrorStatus::NONE;
469 }

References armnn_driver::CommitPools(), CanonicalExecutionContext::deviceEnd, CanonicalExecutionContext::deviceStart, CanonicalExecutionContext::driverEnd, CanonicalExecutionContext::driverStart, IRuntime::EnqueueWorkload(), IRuntime::ImportInputs(), IRuntime::ImportOutputs(), armnn::Malloc, CanonicalExecutionContext::measureTimings, armnn::Success, and Exception::what().

Referenced by ArmnnPreparedModel::execute(), ArmnnPreparedModel::executeFenced(), and ArmnnPreparedModel::ExecuteWithDummyInputs().

◆ ExecuteWithDummyInputs()

bool ExecuteWithDummyInputs ( unsigned int  numInputs,
unsigned int  numOutputs 
) const

Executes this model with dummy inputs (e.g.

all zeroes).

Returns
false on failure, otherwise true

Definition at line 646 of file ArmnnPreparedModel.cpp.

647 {
648  std::vector<std::vector<char>> storage;
649  armnn::InputTensors inputTensors;
650  for (unsigned int i = 0; i < numInputs; i++)
651  {
652  armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
653  // pInputTensors (of type InputTensors) is composed of a vector of ConstTensors.
654  // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
655  inputTensorInfo.SetConstant();
656  storage.emplace_back(inputTensorInfo.GetNumBytes());
657  const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data());
658 
659  inputTensors.emplace_back(i, inputTensor);
660  }
661 
662  armnn::OutputTensors outputTensors;
663  for (unsigned int i = 0; i < numOutputs; i++)
664  {
665  const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
666  storage.emplace_back(outputTensorInfo.GetNumBytes());
667  const armnn::Tensor outputTensor(outputTensorInfo, storage.back().data());
668 
669  outputTensors.emplace_back(i, outputTensor);
670  }
672  ctx.measureTimings = MeasureTiming::NO;
673  auto memPools = std::make_shared<std::vector<::android::nn::RunTimePoolInfo>>();
674 
675  auto errorStatus = ExecuteGraph(memPools,
676  inputTensors,
677  outputTensors,
678  ctx);
679 
680  return errorStatus == ErrorStatus::NONE;
681 }

References ArmnnPreparedModel::ExecuteGraph(), IRuntime::GetInputTensorInfo(), TensorInfo::GetNumBytes(), IRuntime::GetOutputTensorInfo(), CanonicalExecutionContext::measureTimings, and TensorInfo::SetConstant().

◆ GetModelPriority()

Priority GetModelPriority ( ) const

Definition at line 471 of file ArmnnPreparedModel.cpp.

472 {
473  return m_ModelPriority;
474 }

◆ getUnderlyingResource()

std::any getUnderlyingResource ( ) const
override

Definition at line 604 of file ArmnnPreparedModel.cpp.

605 {
606  return &m_Model;
607 }

The documentation for this class was generated from the following files:
armnn::MemorySource::Malloc
@ Malloc
armnn::ImportedInputId
unsigned int ImportedInputId
Definition: Types.hpp:310
armnn::Tensor
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:321
armnn::TensorInfo::GetNumBytes
unsigned int GetNumBytes() const
Definition: Tensor.cpp:427
armnn_driver::DumpJsonProfilingIfRequired
void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled, const std::string &dumpDir, armnn::NetworkId networkId, const armnn::IProfiler *profiler)
Definition: CanonicalUtils.cpp:352
armnn::InputTensors
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:394
armnn_driver::CanonicalExecutionContext::deviceEnd
android::nn::TimePoint deviceEnd
Definition: ArmnnPreparedModel.hpp:36
armnn_driver::CommitPools
void CommitPools(std::vector<::android::nn::RunTimePoolInfo > &memPools)
Definition: CanonicalUtils.cpp:615
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::OutputTensors
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:395
armnn::Exception::what
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
armnn::IRuntime::ImportInputs
std::vector< ImportedInputId > ImportInputs(NetworkId networkId, const InputTensors &inputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportInputs separates the importing and mapping of InputTensors from network execution.
Definition: Runtime.cpp:92
armnn_driver::ComputeShape
OutputShape ComputeShape(const armnn::TensorInfo &info)
Definition: CanonicalUtils.hpp:95
armnn_driver::ErrorStatus
::android::nn::ErrorStatus ErrorStatus
Definition: ConversionUtils.hpp:51
armnn_driver::CanonicalExecutionContext::driverEnd
android::nn::TimePoint driverEnd
Definition: ArmnnPreparedModel.hpp:34
armnn_driver::CanonicalExecutionContext
Definition: ArmnnPreparedModel.hpp:29
armnn::IRuntime::GetOutputTensorInfo
TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:87
armnn::Status::Success
@ Success
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn_driver::GetModelSummary
std::string GetModelSummary(const Model &model)
Definition: CanonicalUtils.cpp:526
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:297
armnn::IRuntime::GetProfiler
const std::shared_ptr< IProfiler > GetProfiler(NetworkId networkId) const
Gets the profiler corresponding to the given network id.
Definition: Runtime.cpp:151
armnn_driver::CanonicalExecutionContext::driverStart
android::nn::TimePoint driverStart
Definition: ArmnnPreparedModel.hpp:33
armnn_driver::CanonicalExecutionContext::deviceStart
android::nn::TimePoint deviceStart
Definition: ArmnnPreparedModel.hpp:35
armnn_driver::CanonicalExecutionContext::measureTimings
::android::nn::MeasureTiming measureTimings
Definition: ArmnnPreparedModel.hpp:31
armnn::IRuntime::UnloadNetwork
Status UnloadNetwork(NetworkId networkId)
Unloads a network from the IRuntime.
Definition: Runtime.cpp:136
armnn::Status
Status
Definition: Types.hpp:42
armnn::IRuntime::GetInputTensorInfo
TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const
Definition: Runtime.cpp:82
armnn::IRuntime::EnqueueWorkload
Status EnqueueWorkload(NetworkId networkId, const InputTensors &inputTensors, const OutputTensors &outputTensors, std::vector< ImportedInputId > preImportedInputIds={}, std::vector< ImportedOutputId > preImportedOutputIds={})
Evaluates a network using input in inputTensors and outputs filled into outputTensors.
Definition: Runtime.cpp:113
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:329
armnn_driver::ArmnnPreparedModel::ExecuteGraph
ErrorStatus ExecuteGraph(std::shared_ptr< std::vector< android::nn::RunTimePoolInfo >> &pMemPools, armnn::InputTensors &inputTensors, armnn::OutputTensors &outputTensors, CanonicalExecutionContext callback, const bool pointerMemory=false) const
execute the graph prepared from the request
Definition: ArmnnPreparedModel.cpp:374
armnn::TensorInfo::SetConstant
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:518
armnn::IRuntime::ImportOutputs
std::vector< ImportedOutputId > ImportOutputs(NetworkId networkId, const OutputTensors &outputTensors, MemorySource forceImportMemorySource=MemorySource::Undefined)
ImportOutputs separates the importing and mapping of OutputTensors from network execution.
Definition: Runtime.cpp:98