ArmNN
 25.11
Loading...
Searching...
No Matches
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
class  LayerInGraph< ConstantLayer >
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...

Public Types

using LayerList = std::list<Layer*>
using Iterator = LayerList::const_iterator
using IteratorDifference = Iterator::difference_type
using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>
using ConstIteratorInputs = TransformIterator<decltype(&PtrCast<const InputLayer>), Iterator>
using ConstIteratorOutputs = TransformIterator<decltype(&PtrCast<const OutputLayer>), Iterator>

Public Member Functions

template<typename Func>
void ForEachLayer (Func func) const
 Graph (bool shapeInferenceMethod=false, bool allowExpandedDims=false)
 Graph (const Graph &other)
Graphoperator= (const Graph &other)=delete
 Graph (Graph &&other)
Graphoperator= (Graph &&other)
 ~Graph ()
Status Print (bool extended=false) const
Status SerializeToDot (std::ostream &stream)
template<typename LayerT, typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
template<typename LayerT, typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.
template<typename LayerT, typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it.
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position.
template<typename LayerT>
void EraseLayer (LayerT *&layer)
 Deletes the layer.
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.
GraphTopologicalSort ()
 Sorts layers in topological order and return this.
const GraphTopologicalSort () const
size_t GetNumInputs () const
size_t GetNumOutputs () const
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.
size_t GetNumLayers () const
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer.
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal > > &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph.
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
void VerifyConstantLayerSetTensorInfo () const
 For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
void InferTensorInfos ()
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph.
const std::shared_ptr< IProfiler > & GetProfiler () const
void SetLayersOutOfOrder ()

Static Public Member Functions

template<typename LayerType>
static LayerTypePtrCast (Layer *const layer)

Friends

class SubgraphView

Detailed Description

Definition at line 30 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 56 of file Graph.hpp.

◆ ConstIteratorInputs

Definition at line 57 of file Graph.hpp.

◆ ConstIteratorOutputs

Definition at line 58 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 53 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 54 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 50 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( bool shapeInferenceMethod = false,
bool allowExpandedDims = false )
inline

Definition at line 98 of file Graph.hpp.

99 : m_LayersInOrder(true)
100 , m_AllowExpandedDims(allowExpandedDims)
101 , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
102 ShapeInferenceMethod::ValidateOnly)
103 , m_Profiler(std::make_shared<IProfiler>())
104 {}

References armnn::InferAndValidate, and armnn::ValidateOnly.

Referenced by Graph(), Graph(), Graph::InputLayersAccessor::InputLayersAccessor(), Graph::LayerInGraph< ConstantLayer >::LayerInGraph(), Graph::LayerInGraph< ConstantLayer >::LayerInGraph(), Graph::LayerInGraph< InputLayer >::LayerInGraph(), Graph::LayerInGraph< InputLayer >::LayerInGraph(), Graph::LayerInGraph< OutputLayer >::LayerInGraph(), operator=(), operator=(), Graph::OutputLayersAccessor::OutputLayersAccessor(), TopologicalSort(), and TopologicalSort().

◆ Graph() [2/3]

Graph ( const Graph & other)

Definition at line 27 of file Graph.cpp.

28: m_LayersInOrder(other.m_LayersInOrder)
29, m_AllowExpandedDims(other.m_AllowExpandedDims)
30, m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31, m_Profiler(other.m_Profiler)
32{
33 std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34
35 for (auto&& otherLayer : other.m_Layers)
36 {
37 Layer* const layer = otherLayer->Clone(*this);
38 otherToClonedMap.emplace(otherLayer, layer);
39 }
40
41 // Copies slot connections.
42 for (auto&& otherLayer : other.m_Layers)
43 {
44 Layer* const thisLayer = otherToClonedMap[otherLayer];
45
46 auto outputSlot = thisLayer->BeginOutputSlots();
47 for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48 {
49 for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50 {
51 const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52 Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53
54 InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55
56 if (otherInputSlot->IsTensorInfoOverridden())
57 {
58 inputSlot.SetTensorInfo(otherInputSlot->GetTensorInfo());
59 }
60 outputSlot->Connect(inputSlot);
61 }
62 outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
63 ++outputSlot;
64 }
65 }
66}

References Graph().

◆ Graph() [3/3]

Graph ( Graph && other)
inline

Definition at line 110 of file Graph.hpp.

111 {
112 *this = std::move(other);
113 }

References Graph().

◆ ~Graph()

~Graph ( )
inline

Definition at line 142 of file Graph.hpp.

143 {
144 ForEachLayer([](Layer* layer)
145 {
146 delete layer;
147 });
148 }

References ForEachLayer().

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal > > & backends,
TensorHandleFactoryRegistry & registry )

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 330 of file Graph.cpp.

332{
333 // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
334 // connections to other layers).
335 auto MayNeedCompatibilityLayer = [](const Layer& layer)
336 {
337 // All layers should have been associated with a valid compute device at this point.
338 if (layer.GetBackendId() == Compute::Undefined)
339 {
340 throw armnn::Exception("AddCompatibilityLayers: All layers must be assigned to a backend at this point.");
341 }
342 // Does not need another compatibility layer if a copy or import layer is already present.
343 return layer.GetType() != LayerType::MemCopy &&
344 layer.GetType() != LayerType::MemImport;
345 };
346
347 auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
348 {
349 return strategy == EdgeStrategy::CopyToTarget ||
350 strategy == EdgeStrategy::ExportToTarget;
351 };
352
353 ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
354 {
355 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(srcLayer, "source layer must not be null.");
356
357 if (!MayNeedCompatibilityLayer(*srcLayer))
358 {
359 // The current layer does not need copy layers, move to the next one
360 return;
361 }
362
363 const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
364 for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
365 {
366 OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
367 const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
368 const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
369 for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
370 {
371 InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
372 if (!dstInputSlot)
373 {
374 throw armnn::Exception("dstInputSlot must not be null.");
375 }
376
377 EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
378 if (strategy == EdgeStrategy::Undefined)
379 {
380 throw armnn::Exception("Undefined memory strategy found "
381 "while adding copy layers for compatibility");
382 }
383
384 const Layer& dstLayer = dstInputSlot->GetOwningLayer();
385 if (MayNeedCompatibilityLayer(dstLayer) &&
386 IsCompatibilityStrategy(strategy))
387 {
388 // A copy layer is needed in between the source and destination layers.
389 // Record the operation rather than attempting to modify the graph as we go.
390 // (invalidating iterators)
391 const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
392 srcLayer->GetName(),
393 srcOutputIndex,
394 dstLayer.GetName(),
395 dstInputSlot->GetSlotIndex());
396 Layer* compLayer = nullptr;
397 if (strategy == EdgeStrategy::CopyToTarget)
398 {
399 compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
400 }
401 else
402 {
403 if (strategy != EdgeStrategy::ExportToTarget)
404 {
405 throw armnn::Exception("Invalid edge strategy found.");
406 }
407
408 compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
409 }
410
411 compLayer->SetBackendId(dstLayer.GetBackendId());
412
413 OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
414 auto backendIt = backends.find(dstLayer.GetBackendId());
415 if (backendIt != backends.end() &&
416 backendIt->second &&
417 backendIt->second->SupportsTensorAllocatorAPI())
418 {
419 auto backend = backendIt->second.get();
420 auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
421 bool found = false;
422
423 for (auto preference : tensorHandleFactoryIds)
424 {
425 auto factory = registry.GetFactory(preference);
426 if (factory)
427 {
428 auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
429 auto srcFactory = registry.GetFactory(srcPref);
430
431 if (srcFactory)
432 {
433 bool canExportImport =
434 (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
435
436 if (factory->SupportsMapUnmap() || canExportImport)
437 {
438 compOutputSlot.SetTensorHandleFactory(preference);
439 found = true;
440 break;
441 }
442 }
443 }
444 }
445
446 if (!found)
447 {
448 compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
449 }
450 }
451 else
452 {
453 compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
454 }
455
456 // The output strategy of a compatibility layer is always DirectCompatibility.
457 compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
458
459 // Recalculate the connection index on the previous layer as we have just inserted into it.
460 const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
461 auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
462 std::find(newSourceConnections.begin(),
463 newSourceConnections.end(),
464 &compLayer->GetInputSlot(0)));
465
466 // The input strategy of a compatibility layer is always DirectCompatibilty.
467 srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
468 EdgeStrategy::DirectCompatibility);
469 }
470 }
471 }
472 });
473}
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)

References AddCompatibilityLayers(), ARMNN_THROW_INVALIDARG_MSG_IF_FALSE, armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetInputSlot(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), InsertNewLayer(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, armnn::numeric_cast(), Layer::SetBackendId(), OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by AddCompatibilityLayers(), and IOptimizedNetwork::Optimize.

◆ AddLayer()

template<typename LayerT, typename... Args>
LayerT * AddLayer ( Args &&... args)
inline

Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.

Definition at line 466 of file Graph.hpp.

467{
468 m_LayersInOrder = m_LayersInOrder &&
469 ((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
470 LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
471
472 layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
473 layer->SetAllowExpandedDims(m_AllowExpandedDims);
474
475 NotifyObservables(GraphEvent::LayerAdded, layer);
476
477 return layer;
478}

References armnn::Input, armnn::LayerAdded, armnn::LayerEnumOf(), and armnn::Output.

Referenced by Layer::CloneBase(), and FuseBatchNorm< ConvLayer, ArmnnType, T >::Run().

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 207 of file Graph.cpp.

208{
209 // Layers must be sorted in topological order
210 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_LayersInOrder, "layers must be in order.");
211
212 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
213
214 std::unordered_set<const ITensorHandle*> preallocatedTensors;
215 std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
216
217 // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
218 // is a TensorHandle, the function just returns it
219 auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
220 {
221 ITensorHandle* ancestor = subTensorHandle;
222 while (ancestor && ancestor->GetParent())
223 {
224 ancestor = ancestor->GetParent();
225 }
226 return ancestor;
227 };
228
229 // Checks whether a TensorHandle has been pre-allocated
230 auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
231 {
232 return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
233 };
234
235 // Constant tensor handles need to last from the beginning of execution till the end,
236 // therefore we pre-allocate them upfront
237 for (auto&& layer : m_Layers)
238 {
239 if (layer->GetType() == LayerType::Constant)
240 {
241 for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
242 {
243 ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
244
245 if (tensorHandle && !IsPreallocated(tensorHandle))
246 {
247 tensorHandle->Allocate();
248 preallocatedTensors.insert(tensorHandle);
249 }
250 }
251 }
252 }
253
254 // Iterate over the network in topological order
255 for (auto&& layer : m_Layers)
256 {
257 // Count the amount of times each output slot references a certain buffer (ITensorHandle).
258 // The first time we encounter a new tensor handle, we start managing its lifetime.
259 for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
260 {
261 ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
262
263 if (tensorHandle && !IsPreallocated(tensorHandle))
264 {
265 unsigned int numConnections = slot->GetNumConnections();
266 if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
267 {
268 handleReferenceCounts[tensorHandle] = numConnections;
269 tensorHandle->Manage();
270 if (handleReferenceCounts[tensorHandle] == 0u)
271 {
272 // if nobody consumes this tensor we call Allocate()
273 tensorHandle->Allocate();
274 }
275 }
276 else
277 {
278 handleReferenceCounts[tensorHandle] += numConnections;
279 }
280 }
281 }
282
283 // Loop through the input slots in the same layer and decrement the reference counter associated
284 // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
285 for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
286 {
287 ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
288 slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
289
290 if (tensorHandle && !IsPreallocated(tensorHandle))
291 {
292 --handleReferenceCounts[tensorHandle];
293
294 if (handleReferenceCounts[tensorHandle] == 0u)
295 {
296 // Stop managing lifetime of tensor handle
297 tensorHandle->Allocate();
298 handleReferenceCounts.erase(tensorHandle);
299 }
300 }
301 }
302 }
303
304 return Status::Success;
305}
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)

References ITensorHandle::Allocate(), AllocateDynamicBuffers(), ARMNN_SCOPED_PROFILING_EVENT, ARMNN_THROW_INVALIDARG_MSG_IF_FALSE, armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), armnn::Success, and armnn::Undefined.

Referenced by AllocateDynamicBuffers().

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const observable,
GraphEvent notifyOnEvent )
inline

Definition at line 226 of file Graph.hpp.

226 {
227 m_Views[notifyOnEvent].emplace_back(observable);
228 }

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 176 of file Graph.hpp.

176{ return m_Layers.begin(); }

Referenced by cbegin(), armnn::IsTfLiteTurboModel(), Graph::LayerInGraph< ConstantLayer >::LayerInGraph(), Graph::LayerInGraph< InputLayer >::LayerInGraph(), IOptimizedNetwork::Optimize, and Optimizer::Pass().

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 181 of file Graph.hpp.

181{ return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

References PtrCast().

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 186 of file Graph.hpp.

186{ return begin(); }

References begin().

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 188 of file Graph.hpp.

188{ return end(); }

References end().

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const observable,
GraphEvent notifyOnEvent )
inline

Definition at line 230 of file Graph.hpp.

230 {
231 m_Views[notifyOnEvent].remove(observable);
232 }

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 178 of file Graph.hpp.

178{ return m_Layers.end(); }

Referenced by cend(), armnn::IsTfLiteTurboModel(), Graph::LayerInGraph< OutputLayer >::LayerInGraph(), IOptimizedNetwork::Optimize, and Optimizer::Pass().

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 183 of file Graph.hpp.

183{ return {m_Layers.end(), &(PtrCast<const Layer>)}; }

References PtrCast().

◆ EraseLayer() [1/2]

void EraseLayer ( Iterator pos)
inline

Deletes the layer at the specified position.

Definition at line 517 of file Graph.hpp.

518{
519 NotifyObservables(GraphEvent::LayerErased, *pos);
520
521 delete *pos;
522}

References armnn::LayerErased.

Referenced by armnn::ApplyBackendOptimizations(), EraseLayer(), Optimizer::Pass(), OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run(), and OptimizeForExclusiveConnectionImpl< BaseType, ChildType, Wrapped >::Run().

◆ EraseLayer() [2/2]

template<typename LayerT>
void EraseLayer ( LayerT *& layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 525 of file Graph.hpp.

526{
527 if (!layer)
528 {
529 throw armnn::NullPointerException("layer must not be null.");
530 }
531
532 EraseLayer(GetPosInGraph(*layer));
533 layer = nullptr;
534}

References EraseLayer(), and GetPosInGraph().

◆ ForEachLayer()

template<typename Func>
void ForEachLayer ( Func func) const
inline

Definition at line 40 of file Graph.hpp.

41 {
42 for (auto it = m_Layers.begin(); it != m_Layers.end(); )
43 {
44 auto next = std::next(it);
45 func(*it);
46 it = next;
47 }
48 }

Referenced by AddCompatibilityLayers(), armnn::SelectTensorHandleStrategy(), and ~Graph().

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 199 of file Graph.hpp.

199{ return InputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportInputs().

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

Definition at line 205 of file Graph.hpp.

205{ return m_Layers.size(); }

Referenced by LoadedNetwork::EnqueueWorkload().

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 195 of file Graph.hpp.

195{ return m_OutputIds.size(); }

Referenced by LoadedNetwork::EnqueueWorkload().

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 203 of file Graph.hpp.

203{ return OutputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportOutputs().

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer & layer)
inline

Gets the position of a layer in the graph.

Definition at line 455 of file Graph.hpp.

456{
457 auto it = m_PosInGraphMap.find(&layer);
458 if (it == m_PosInGraphMap.end())
459 {
460 throw armnn::Exception("unable to find layer in graph map.");
461 }
462 return it->second;
463}

Referenced by EraseLayer(), InsertNewLayer(), InsertNewLayer(), and Optimizer::Pass().

◆ GetProfiler()

const std::shared_ptr< IProfiler > & GetProfiler ( ) const

Definition at line 733 of file Graph.cpp.

734{
735 return m_Profiler;
736}

References GetProfiler().

Referenced by GetProfiler(), and IOptimizedNetwork::Optimize.

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 645 of file Graph.cpp.

646{
647 for (auto&& layer : TopologicalSort())
648 {
649 for (auto&& input : layer->GetInputSlots())
650 {
651 const IOutputSlot* source = input.GetConnectedOutputSlot();
652 if (source == NULL)
653 {
654 // Throws exception due to a layer input not being connected to an output slot.
655 // Verifies input slot weights and bias are set for FullyConnected layers.
656 ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
657 }
658
659 if (!source->IsTensorInfoSet())
660 {
661 std::ostringstream message;
662 message << "Output slot TensorInfo not set on "
663 << GetLayerTypeAsCString(layer->GetType())
664 << " layer "
665 << std::quoted(layer->GetName());
666 throw LayerValidationException(message.str());
667 }
668 }
669
670 if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
671 {
672 layer->ValidateTensorShapesFromInputs();
673 }
674 }
675}
const char * GetLayerTypeAsCString(LayerType type)

References armnn::GetLayerTypeAsCString(), InferTensorInfos(), IOutputSlot::IsTensorInfoSet(), TopologicalSort(), and armnn::ValidateOnly.

Referenced by InferTensorInfos(), and IOptimizedNetwork::Optimize.

◆ InsertNewLayer() [1/2]

template<typename LayerT, typename... Args>
LayerT * InsertNewLayer ( InputSlot & insertBefore,
Args &&... args )
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 481 of file Graph.hpp.

482{
483 // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
484 OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
485 const Iterator pos = (parentOut != nullptr)
486 ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
487 : GetPosInGraph(insertBefore.GetOwningLayer());
488 LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
489 insertBefore.Insert(*layer);
490
491 NotifyObservables(GraphEvent::LayerAdded, layer);
492
493 return layer;
494}

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by AddCompatibilityLayers(), armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), AddBroadcastReshapeLayerImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), MaxMinIntoBoundedReluImpl::Run(), MovePermuteUpImpl::Run(), MoveTransposeUpImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), and PermuteDepthwiseConv2dWeightsImpl::~PermuteDepthwiseConv2dWeightsImpl().

◆ InsertNewLayer() [2/2]

template<typename LayerT, typename... Args>
LayerT * InsertNewLayer ( OutputSlot & insertAfter,
Args &&... args )
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 497 of file Graph.hpp.

498{
499 Layer& owningLayer = insertAfter.GetOwningLayer();
500
501 const Iterator pos = std::next(GetPosInGraph(owningLayer));
502 LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
503
504 if (layer->GetNumInputSlots() != 1)
505 {
506 throw armnn::Exception("layer must only one input slot.");
507 }
508
509 insertAfter.MoveAllConnections(layer->GetOutputSlot());
510 insertAfter.Connect(layer->GetInputSlot(0));
511
512 NotifyObservables(GraphEvent::LayerAdded, layer);
513
514 return layer;
515}

References OutputSlot::Connect(), OutputSlot::GetOwningLayer(), GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

◆ operator=() [1/2]

Graph & operator= ( const Graph & other)
delete

References Graph().

◆ operator=() [2/2]

Graph & operator= ( Graph && other)
inline

Definition at line 115 of file Graph.hpp.

116 {
117 m_InputIds = std::move(other.m_InputIds);
118 m_OutputIds = std::move(other.m_OutputIds);
119 m_LayersInOrder = std::move(other.m_LayersInOrder);
120 m_Views = std::move(other.m_Views);
121 m_Profiler = std::move(other.m_Profiler);
122 m_AllowExpandedDims = other.m_AllowExpandedDims;
123 m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
124 other.ForEachLayer([this](Layer* otherLayer)
125 {
126 otherLayer->Reparent(*this, m_Layers.end());
127 });
128
129 if (!other.m_PosInGraphMap.empty())
130 {
131 throw armnn::Exception("assignment positions in graph map must be empty.");
132 }
133
134 if (!other.m_Layers.empty())
135 {
136 throw armnn::Exception("assignment layers must be empty.");
137 }
138
139 return *this;
140 }

References Graph(), and Layer::Reparent().

◆ Print()

Status Print ( bool extended = false) const

Definition at line 68 of file Graph.cpp.

69{
70 if (m_Layers.empty())
71 {
72 ARMNN_LOG(info) << "\n Graph is empty.\n";
73 return Status::Success;
74 }
75 ARMNN_LOG(info) << "\n";
76 ARMNN_LOG(info) << "Walking Pattern: \n";
77
78 for (auto&& it : TopologicalSort())
79 {
80 auto numInputSlots = it->GetNumInputSlots();
81 auto numOutputSlots = it->GetNumOutputSlots();
82
83 std::string guid;
84 if (extended)
85 {
86 guid += ":";
87 guid += std::to_string(it->GetGuid());
88 }
89 ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
90 << ":" << it->GetBackendId().Get()
91 << guid
92 << " has " << numInputSlots << " input slots"
93 << " and " << numOutputSlots << " output slots.";
94
95 for (auto i : it->GetInputSlots())
96 {
97 std::ostringstream message;
98 auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
99 unsigned int numDims = inputTensorShape.GetNumDimensions();
100
101 message << "The input slot has shape [ ";
102 for (unsigned int dim=0; dim < numDims; dim++)
103 {
104 message << inputTensorShape[dim] << ",";
105 }
106 message << " ]";
107 if (extended)
108 {
109 message << " Scale: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationScale();
110 message << " Offset: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationOffset();
111 message << " The input slot is connected to: ";
112 message << i.GetConnectedOutputSlot()->GetOwningIConnectableLayer().GetGuid();
113 }
114 ARMNN_LOG(info) << message.str();
115 }
116
117 for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
118 {
119 const armnn::Layer *layer = it;
120 std::ostringstream message;
121 auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
122 unsigned int numDims = outputTensorShape.GetNumDimensions();
123
124 message << "The output slot has shape [ ";
125 for (unsigned int dim=0; dim < numDims; dim++)
126 {
127 message << outputTensorShape[dim] << ",";
128 }
129 message << " ]";
130 if (extended)
131 {
132 message << " Scale: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationScale();
133 message << " Offset: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationOffset();
134 message << " The output slot is connected to: ";
135 message << layer->GetOutputSlots()[i].GetConnection(0)->GetOwningIConnectableLayer().GetGuid();
136 }
137 ARMNN_LOG(info) << message.str();
138 }
139 ARMNN_LOG(info) << "\n";
140 }
141 ARMNN_LOG(info) << "\n\n";
142
143 return Status::Success;
144}
#define ARMNN_LOG(severity)
Definition Logging.hpp:212
const std::vector< OutputSlot > & GetOutputSlots() const
Definition Layer.hpp:259

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), Layer::GetOutputSlots(), armnn::info, Print(), armnn::Success, and TopologicalSort().

Referenced by CheckOrder(), and Print().

◆ PtrCast()

template<typename LayerType>
LayerType * PtrCast ( Layer *const layer)
inlinestatic

Definition at line 34 of file Graph.hpp.

35 {
36 return PolymorphicDowncast<LayerType*>(layer);
37 }

References armnn::PolymorphicDowncast().

Referenced by begin(), Graph::InputLayersAccessor::begin(), Graph::OutputLayersAccessor::begin(), end(), Graph::InputLayersAccessor::end(), and Graph::OutputLayersAccessor::end().

◆ SerializeToDot()

Status SerializeToDot ( std::ostream & stream)

Definition at line 146 of file Graph.cpp.

147{
148 {
149 DotGraph graph(stream, "Optimized");
150
151 {
152 // Default node attributes:
153 DotDefaults nodes(stream, "node");
154 nodes.GetAttributeSet()
155 .AddAttribute("shape", "record");
156 }
157
158 {
159 // Default edge attributes:
160 DotDefaults edges(stream, "edge");
161 edges.GetAttributeSet()
162 .AddAttribute("fontsize", 8)
163 .AddAttribute("fontcolor", "blue")
164 .AddAttribute("fontname", "arial-bold");
165 }
166
167 // First declares the nodes.
168 for (auto&& layer : m_Layers)
169 {
170 DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
171 // Extracts the layer parameters.
172 ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
173 node.GetContents().AddContent(name + " : " + value);
174 };
175 layer->SerializeLayerParameters(extractParams);
176 }
177
178 // Second declares the edges.
179 for (auto&& layer : m_Layers)
180 {
181 LayerGuid toId = layer->GetGuid();
182
183 for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
184 {
185 OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
186 LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
187 DotEdge edge(stream, fromId, toId);
188
189 // Now print the tensor shape on the edge.
190 {
191 // Constructs the label attribute with HTML markup.
192 std::stringstream ss;
193 ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
194 edge.GetAttributeSet().AddAttribute("label", ss);
195 }
196 }
197 }
198 }
199
200 if (stream.bad())
201 {
202 return Status::Failure;
203 }
204 return Status::Success;
205}
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition Types.hpp:26
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotDefaults::GetAttributeSet(), DotEdge::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), SerializeToDot(), and armnn::Success.

Referenced by SerializeToDot().

◆ SetLayersOutOfOrder()

void SetLayersOutOfOrder ( )

Definition at line 738 of file Graph.cpp.

739{
740 m_LayersInOrder = false;
741}

References SetLayersOutOfOrder().

Referenced by SetLayersOutOfOrder().

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView & subgraph,
const SubgraphView & substituteSubgraph )

Definition at line 486 of file Graph.cpp.

487{
488 // Look through each layer in the new subgraph and add any that are not already a member of this graph
489 substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
490 {
491 if (std::find(std::begin(m_Layers),
492 std::end(m_Layers),
493 iConnectableLayer) == std::end(m_Layers))
494 {
495 auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
496 layer->Reparent(*this, m_Layers.end());
497 m_LayersInOrder = false;
498 }
499 });
500
501 ReplaceSubgraphConnections(subgraph, substituteSubgraph);
502 EraseSubgraphLayers(subgraph);
503 TopologicalSort();
504}

References SubgraphView::ForEachIConnectableLayer(), armnn::PolymorphicDowncast(), SubgraphView, SubstituteSubgraph(), and TopologicalSort().

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView & subgraph,
IConnectableLayer * substituteLayer )

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 475 of file Graph.cpp.

476{
477 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(substituteLayer, "substituteLayer should not be null");
478
479 // Create a new sub-graph with only the given layer, using
480 // the given sub-graph as a reference of which parent graph to use
481 SubgraphView substituteSubgraph(substituteLayer);
482
483 SubstituteSubgraph(subgraph, substituteSubgraph);
484}

References ARMNN_THROW_INVALIDARG_MSG_IF_FALSE, SubgraphView, and SubstituteSubgraph().

Referenced by armnn::ApplyBackendOptimizations(), SubstituteSubgraph(), and SubstituteSubgraph().

◆ TopologicalSort() [1/2]

Graph & TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 191 of file Graph.hpp.

191{ const_cast<const Graph*>(this)->TopologicalSort(); return *this; }

References Graph(), and TopologicalSort().

Referenced by CheckOrder(), LoadedNetwork::ImportInputs(), LoadedNetwork::ImportOutputs(), InferTensorInfos(), Optimizer::Pass(), Print(), LoadedNetwork::SendNetworkStructure(), SubstituteSubgraph(), TopologicalSort(), TopologicalSort(), and VerifyConstantLayerSetTensorInfo().

◆ TopologicalSort() [2/2]

const Graph & TopologicalSort ( ) const

Definition at line 307 of file Graph.cpp.

308{
309 if (!m_LayersInOrder)
310 {
311 // Resets layer order.
312 for (auto&& it : m_Layers)
313 {
314 it->ResetPriority();
315 }
316
317 auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
318 {
319 return layerA->GetPriority() < layerB->GetPriority();
320 };
321
322 m_Layers.sort(compareLayerPriority);
323
324 m_LayersInOrder = true;
325 }
326
327 return *this;
328}

References Graph(), and TopologicalSort().

◆ VerifyConstantLayerSetTensorInfo()

void VerifyConstantLayerSetTensorInfo ( ) const

For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.

LayerValidationException thrown if no TensorInfo is set.

LayerValidationException thrown if no TensorInfo is set.

Exceptions
LayerValidationException

Definition at line 622 of file Graph.cpp.

623{
624 for (auto&& layer : TopologicalSort())
625 {
626 if (layer->GetType() == armnn::LayerType::Constant)
627 {
628 for (auto&& output: layer->GetOutputSlots())
629 {
630 if (!output.IsTensorInfoSet())
631 {
632 std::ostringstream message;
633 message << "Output slot TensorInfo not set on "
634 << GetLayerTypeAsCString(layer->GetType())
635 << " layer \""
636 << layer->GetName()
637 << "\"";
638 throw LayerValidationException(message.str());
639 }
640 }
641 }
642 }
643}

References armnn::Constant, armnn::GetLayerTypeAsCString(), TopologicalSort(), and VerifyConstantLayerSetTensorInfo().

Referenced by IOptimizedNetwork::Optimize, and VerifyConstantLayerSetTensorInfo().

◆ SubgraphView

friend class SubgraphView
friend

Definition at line 307 of file Graph.hpp.

References SubgraphView.

Referenced by SubgraphView, SubstituteSubgraph(), and SubstituteSubgraph().


The documentation for this class was generated from the following files: