ArmNN
 24.08
Graph.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Graph.hpp>
7 #include <LayersFwd.hpp>
8 
11 
12 #include <armnn/BackendId.hpp>
13 #include <armnn/Logging.hpp>
14 #include <armnn/TypesUtils.hpp>
15 #include <armnn/utility/Assert.hpp>
17 
18 #include <fmt/format.h>
19 
20 #include <unordered_map>
21 #include <DotSerializer.hpp>
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 Graph::Graph(const Graph& other)
28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34 
35  for (auto&& otherLayer : other.m_Layers)
36  {
37  Layer* const layer = otherLayer->Clone(*this);
38  otherToClonedMap.emplace(otherLayer, layer);
39  }
40 
41  // Copies slot connections.
42  for (auto&& otherLayer : other.m_Layers)
43  {
44  Layer* const thisLayer = otherToClonedMap[otherLayer];
45 
46  auto outputSlot = thisLayer->BeginOutputSlots();
47  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48  {
49  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50  {
51  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53 
54  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55 
56  if (otherInputSlot->IsTensorInfoOverridden())
57  {
58  inputSlot.SetTensorInfo(otherInputSlot->GetTensorInfo());
59  }
60  outputSlot->Connect(inputSlot);
61  }
62  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
63  ++outputSlot;
64  }
65  }
66 }
67 
68 Status Graph::Print(bool extended) const
69 {
70  if (m_Layers.empty())
71  {
72  ARMNN_LOG(info) << "\n Graph is empty.\n";
73  return Status::Success;
74  }
75  ARMNN_LOG(info) << "\n";
76  ARMNN_LOG(info) << "Walking Pattern: \n";
77 
78  for (auto&& it : TopologicalSort())
79  {
80  auto numInputSlots = it->GetNumInputSlots();
81  auto numOutputSlots = it->GetNumOutputSlots();
82 
83  std::string guid;
84  if (extended)
85  {
86  guid += ":";
87  guid += std::to_string(it->GetGuid());
88  }
89  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
90  << ":" << it->GetBackendId().Get()
91  << guid
92  << " has " << numInputSlots << " input slots"
93  << " and " << numOutputSlots << " output slots.";
94 
95  for (auto i : it->GetInputSlots())
96  {
97  std::ostringstream message;
98  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
99  unsigned int numDims = inputTensorShape.GetNumDimensions();
100 
101  message << "The input slot has shape [ ";
102  for (unsigned int dim=0; dim < numDims; dim++)
103  {
104  message << inputTensorShape[dim] << ",";
105  }
106  message << " ]";
107  if (extended)
108  {
109  message << " Scale: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationScale();
110  message << " Offset: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationOffset();
111  message << " The input slot is connected to: ";
112  message << i.GetConnectedOutputSlot()->GetOwningIConnectableLayer().GetGuid();
113  }
114  ARMNN_LOG(info) << message.str();
115  }
116 
117  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
118  {
119  const armnn::Layer *layer = it;
120  std::ostringstream message;
121  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
122  unsigned int numDims = outputTensorShape.GetNumDimensions();
123 
124  message << "The output slot has shape [ ";
125  for (unsigned int dim=0; dim < numDims; dim++)
126  {
127  message << outputTensorShape[dim] << ",";
128  }
129  message << " ]";
130  if (extended)
131  {
132  message << " Scale: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationScale();
133  message << " Offset: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationOffset();
134  message << " The output slot is connected to: ";
135  message << layer->GetOutputSlots()[i].GetConnection(0)->GetOwningIConnectableLayer().GetGuid();
136  }
137  ARMNN_LOG(info) << message.str();
138  }
139  ARMNN_LOG(info) << "\n";
140  }
141  ARMNN_LOG(info) << "\n\n";
142 
143  return Status::Success;
144 }
145 
146 Status Graph::SerializeToDot(std::ostream& stream)
147 {
148  {
149  DotGraph graph(stream, "Optimized");
150 
151  {
152  // Default node attributes:
153  DotDefaults nodes(stream, "node");
154  nodes.GetAttributeSet()
155  .AddAttribute("shape", "record");
156  }
157 
158  {
159  // Default edge attributes:
160  DotDefaults edges(stream, "edge");
161  edges.GetAttributeSet()
162  .AddAttribute("fontsize", 8)
163  .AddAttribute("fontcolor", "blue")
164  .AddAttribute("fontname", "arial-bold");
165  }
166 
167  // First declares the nodes.
168  for (auto&& layer : m_Layers)
169  {
170  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
171  // Extracts the layer parameters.
172  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
173  node.GetContents().AddContent(name + " : " + value);
174  };
175  layer->SerializeLayerParameters(extractParams);
176  }
177 
178  // Second declares the edges.
179  for (auto&& layer : m_Layers)
180  {
181  LayerGuid toId = layer->GetGuid();
182 
183  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
184  {
185  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
186  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
187  DotEdge edge(stream, fromId, toId);
188 
189  // Now print the tensor shape on the edge.
190  {
191  // Constructs the label attribute with HTML markup.
192  std::stringstream ss;
193  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
194  edge.GetAttributeSet().AddAttribute("label", ss);
195  }
196  }
197  }
198  }
199 
200  if (stream.bad())
201  {
202  return Status::Failure;
203  }
204  return Status::Success;
205 }
206 
208 {
209  // Layers must be sorted in topological order
210  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_LayersInOrder, "layers must be in order.");
211 
212  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
213 
214  std::unordered_set<const ITensorHandle*> preallocatedTensors;
215  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
216 
217  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
218  // is a TensorHandle, the function just returns it
219  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
220  {
221  ITensorHandle* ancestor = subTensorHandle;
222  while (ancestor && ancestor->GetParent())
223  {
224  ancestor = ancestor->GetParent();
225  }
226  return ancestor;
227  };
228 
229  // Checks whether a TensorHandle has been pre-allocated
230  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
231  {
232  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
233  };
234 
235  // Constant tensor handles need to last from the beginning of execution till the end,
236  // therefore we pre-allocate them upfront
237  for (auto&& layer : m_Layers)
238  {
239  if (layer->GetType() == LayerType::Constant)
240  {
241  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
242  {
243  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
244 
245  if (tensorHandle && !IsPreallocated(tensorHandle))
246  {
247  tensorHandle->Allocate();
248  preallocatedTensors.insert(tensorHandle);
249  }
250  }
251  }
252  }
253 
254  // Iterate over the network in topological order
255  for (auto&& layer : m_Layers)
256  {
257  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
258  // The first time we encounter a new tensor handle, we start managing its lifetime.
259  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
260  {
261  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
262 
263  if (tensorHandle && !IsPreallocated(tensorHandle))
264  {
265  unsigned int numConnections = slot->GetNumConnections();
266  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
267  {
268  handleReferenceCounts[tensorHandle] = numConnections;
269  tensorHandle->Manage();
270  if (handleReferenceCounts[tensorHandle] == 0u)
271  {
272  // if nobody consumes this tensor we call Allocate()
273  tensorHandle->Allocate();
274  }
275  }
276  else
277  {
278  handleReferenceCounts[tensorHandle] += numConnections;
279  }
280  }
281  }
282 
283  // Loop through the input slots in the same layer and decrement the reference counter associated
284  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
285  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
286  {
287  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
288  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
289 
290  if (tensorHandle && !IsPreallocated(tensorHandle))
291  {
292  --handleReferenceCounts[tensorHandle];
293 
294  if (handleReferenceCounts[tensorHandle] == 0u)
295  {
296  // Stop managing lifetime of tensor handle
297  tensorHandle->Allocate();
298  handleReferenceCounts.erase(tensorHandle);
299  }
300  }
301  }
302  }
303 
304  return Status::Success;
305 }
306 
307 const Graph& Graph::TopologicalSort() const
308 {
309  if (!m_LayersInOrder)
310  {
311  // Resets layer order.
312  for (auto&& it : m_Layers)
313  {
314  it->ResetPriority();
315  }
316 
317  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
318  {
319  return layerA->GetPriority() < layerB->GetPriority();
320  };
321 
322  m_Layers.sort(compareLayerPriority);
323 
324  m_LayersInOrder = true;
325  }
326 
327  return *this;
328 }
329 
330 void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendInternal>>& backends,
331  TensorHandleFactoryRegistry& registry)
332 {
333  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
334  // connections to other layers).
335  auto MayNeedCompatibilityLayer = [](const Layer& layer)
336  {
337  // All layers should have been associated with a valid compute device at this point.
338  if (layer.GetBackendId() == Compute::Undefined)
339  {
340  throw armnn::Exception("AddCompatibilityLayers: All layers must be assigned to a backend at this point.");
341  }
342  // Does not need another compatibility layer if a copy or import layer is already present.
343  return layer.GetType() != LayerType::MemCopy &&
344  layer.GetType() != LayerType::MemImport;
345  };
346 
347  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
348  {
349  return strategy == EdgeStrategy::CopyToTarget ||
350  strategy == EdgeStrategy::ExportToTarget;
351  };
352 
353  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
354  {
355  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(srcLayer, "source layer must not be null.");
356 
357  if (!MayNeedCompatibilityLayer(*srcLayer))
358  {
359  // The current layer does not need copy layers, move to the next one
360  return;
361  }
362 
363  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
364  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
365  {
366  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
367  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
368  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
369  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
370  {
371  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
372  if (!dstInputSlot)
373  {
374  throw armnn::Exception("dstInputSlot must not be null.");
375  }
376 
377  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
378  if (strategy == EdgeStrategy::Undefined)
379  {
380  throw armnn::Exception("Undefined memory strategy found "
381  "while adding copy layers for compatibility");
382  }
383 
384  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
385  if (MayNeedCompatibilityLayer(dstLayer) &&
386  IsCompatibilityStrategy(strategy))
387  {
388  // A copy layer is needed in between the source and destination layers.
389  // Record the operation rather than attempting to modify the graph as we go.
390  // (invalidating iterators)
391  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
392  srcLayer->GetName(),
393  srcOutputIndex,
394  dstLayer.GetName(),
395  dstInputSlot->GetSlotIndex());
396  Layer* compLayer = nullptr;
397  if (strategy == EdgeStrategy::CopyToTarget)
398  {
399  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
400  }
401  else
402  {
403  if (strategy != EdgeStrategy::ExportToTarget)
404  {
405  throw armnn::Exception("Invalid edge strategy found.");
406  }
407 
408  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
409  }
410 
411  compLayer->SetBackendId(dstLayer.GetBackendId());
412 
413  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
414  auto backendIt = backends.find(dstLayer.GetBackendId());
415  if (backendIt != backends.end() &&
416  backendIt->second &&
417  backendIt->second->SupportsTensorAllocatorAPI())
418  {
419  auto backend = backendIt->second.get();
420  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
421  bool found = false;
422 
423  for (auto preference : tensorHandleFactoryIds)
424  {
425  auto factory = registry.GetFactory(preference);
426  if (factory)
427  {
428  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
429  auto srcFactory = registry.GetFactory(srcPref);
430 
431  if (srcFactory)
432  {
433  bool canExportImport =
434  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
435 
436  if (factory->SupportsMapUnmap() || canExportImport)
437  {
438  compOutputSlot.SetTensorHandleFactory(preference);
439  found = true;
440  break;
441  }
442  }
443  }
444  }
445 
446  if (!found)
447  {
449  }
450  }
451  else
452  {
454  }
455 
456  // The output strategy of a compatibility layer is always DirectCompatibility.
458 
459  // Recalculate the connection index on the previous layer as we have just inserted into it.
460  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
461  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
462  std::find(newSourceConnections.begin(),
463  newSourceConnections.end(),
464  &compLayer->GetInputSlot(0)));
465 
466  // The input strategy of a compatibility layer is always DirectCompatibilty.
467  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
469  }
470  }
471  }
472  });
473 }
474 
476 {
477  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(substituteLayer, "substituteLayer should not be null");
478 
479  // Create a new sub-graph with only the given layer, using
480  // the given sub-graph as a reference of which parent graph to use
481  SubgraphView substituteSubgraph(substituteLayer);
482 
483  SubstituteSubgraph(subgraph, substituteSubgraph);
484 }
485 
486 void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
487 {
488  // Look through each layer in the new subgraph and add any that are not already a member of this graph
489  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
490  {
491  if (std::find(std::begin(m_Layers),
492  std::end(m_Layers),
493  iConnectableLayer) == std::end(m_Layers))
494  {
495  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
496  layer->Reparent(*this, m_Layers.end());
497  m_LayersInOrder = false;
498  }
499  });
500 
501  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
502  EraseSubgraphLayers(subgraph);
503  TopologicalSort();
504 }
505 
506 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
507 {
508  if (substituteSubgraph.GetIConnectableLayers().empty())
509  {
510  throw armnn::Exception("New sub-graph used for substitution must not be empty");
511  }
512 
513  const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
514  std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
515  {
516  layer = PolymorphicDowncast<Layer*>(layer);
517  if (std::find(m_Layers.begin(), m_Layers.end(), layer) == m_Layers.end())
518  {
519  throw armnn::Exception("Substitute layer is not a member of graph");
520  }
521  });
522 
523  const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
524  const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraph.GetIOutputSlots();
525 
526  unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
527  unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
528 
529  const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
530  const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
531 
532  if (subgraphNumInputSlots != substituteSubgraphInputSlots.size())
533  {
534  throw armnn::Exception("subgraph and substitute subgraph input slot sizes must be the same.");
535  }
536 
537  if (subgraphNumOutputSlots != substituteSubgraphOutputSlots.size())
538  {
539  throw armnn::Exception("subgraph and substitute subgraph output slot sizes must be the same.");
540  }
541 
542  // Disconnect the sub-graph and replace it with the substitute sub-graph
543 
544  // Step 1: process input slots
545  for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
546  {
547  IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
548  if (!subgraphInputSlot)
549  {
550  throw armnn::NullPointerException("subgraphInputSlot must not be null.");
551  }
552 
553  // Only disconnect if the InputSlot has a connection, this might not be the case when
554  // dealing with working copies of SubgraphViews
555  // Note: we don't need this check for OutputSlot as it iterates over a vector of valid connections
556  if (subgraphInputSlot->GetConnection())
557  {
558  IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
559  InputSlot* inputSlot = PolymorphicDowncast<InputSlot*>(subgraphInputSlot);
560  bool isOverridden = inputSlot->IsTensorInfoOverridden();
561 
562  if (!connectedOutputSlot)
563  {
564  throw armnn::NullPointerException("connectedOutputSlot must not be null.");
565  }
566 
567  connectedOutputSlot->Disconnect(*subgraphInputSlot);
568 
569  IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
570  if (!substituteInputSlot)
571  {
572  throw armnn::NullPointerException("substituteInputSlot must not be null.");
573  }
574 
575  connectedOutputSlot->Connect(*substituteInputSlot);
576 
577  if (isOverridden)
578  {
579  TensorInfo overridden = inputSlot->GetTensorInfo();
580  InputSlot* newInputSlot = PolymorphicDowncast<InputSlot*>(substituteInputSlot);
581  newInputSlot->SetTensorInfo(overridden);
582  }
583  }
584  }
585 
586  // Step 2: process output slots
587  for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
588  {
589  auto subgraphOutputSlot =
590  PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
591  if (!subgraphOutputSlot)
592  {
593  throw armnn::NullPointerException("subgraphOutputSlot must not be null.");
594  }
595 
596  auto substituteOutputSlot =
597  PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
598  if (!substituteOutputSlot)
599  {
600  throw armnn::NullPointerException("substituteOutputSlot must not be null.");
601  }
602 
603  subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
604  }
605 }
606 
607 void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
608 {
609 
610  for (auto iConnectableLayer : subgraph.GetIConnectableLayers())
611  {
612  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
613  EraseLayer(layer);
614  }
615  subgraph.Clear();
616 }
617 
618 /// For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
619 /// LayerValidationException thrown if no TensorInfo is set.
620 ///
621 /// @throws LayerValidationException
622 void Graph::VerifyConstantLayerSetTensorInfo() const
623 {
624  for (auto&& layer : TopologicalSort())
625  {
626  if (layer->GetType() == armnn::LayerType::Constant)
627  {
628  for (auto&& output: layer->GetOutputSlots())
629  {
630  if (!output.IsTensorInfoSet())
631  {
632  std::ostringstream message;
633  message << "Output slot TensorInfo not set on "
634  << GetLayerTypeAsCString(layer->GetType())
635  << " layer \""
636  << layer->GetName()
637  << "\"";
638  throw LayerValidationException(message.str());
639  }
640  }
641  }
642  }
643 }
644 
645 void Graph::InferTensorInfos()
646 {
647  for (auto&& layer : TopologicalSort())
648  {
649  for (auto&& input : layer->GetInputSlots())
650  {
651  const IOutputSlot* source = input.GetConnectedOutputSlot();
652  if (source == NULL)
653  {
654  // Throws exception due to a layer input not being connected to an output slot.
655  // Verifies input slot weights and bias are set for FullyConnected layers.
656  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
657  }
658 
659  if (!source->IsTensorInfoSet())
660  {
661  std::ostringstream message;
662  message << "Output slot TensorInfo not set on "
663  << GetLayerTypeAsCString(layer->GetType())
664  << " layer "
665  << std::quoted(layer->GetName());
666  throw LayerValidationException(message.str());
667  }
668  }
669 
670  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
671  {
672  layer->ValidateTensorShapesFromInputs();
673  }
674  }
675 }
676 
677 /// Throws exception due to a layer input not being connected to an output slot.
678 /// Verifies weights and bias are set for layers on input slots 1
679 /// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
680 ///
681 /// @param layer constant pointer to a Layer object
682 /// @param slotIndex input slot index of layer
683 /// @throws LayerValidationException
684 void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
685  unsigned int slotIndex)
686 {
687  std::ostringstream message;
688  bool noWeightsAndBias = false;
689 
690  if ((layer->GetType() == armnn::LayerType::FullyConnected ||
693  layer->GetType() == armnn::LayerType::DepthwiseConvolution2d) && slotIndex > 0)
694  {
695  message << std::endl;
696 
697  // If weights are not set and is bias enabled, also check if bias is set
698  if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
699  {
700  const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
701  if (biasSource == NULL)
702  {
703  message << "Weights and bias layers not set." << std::endl;
704  noWeightsAndBias = true;
705  }
706  }
707 
708  // Only weights or bias are not set
709  if (!noWeightsAndBias)
710  {
711  if (slotIndex == 1)
712  {
713  message << "Weights layer not set." << std::endl;
714  }
715  else
716  {
717  message << "Bias layer not set." << std::endl;
718  }
719  }
720  }
721 
722  std::string slotString = noWeightsAndBias ? "1 & 2" : std::to_string(slotIndex);
723  message << "Input slot(s) "
724  << slotString
725  << " for "
726  << GetLayerTypeAsCString(layer->GetType())
727  << " not connected to an output slot. " << std::endl
728  << "Layer name: "
729  << std::quoted(layer->GetName());
730  throw LayerValidationException(message.str());
731 }
732 
733 const std::shared_ptr<IProfiler>& Graph::GetProfiler() const
734 {
735  return m_Profiler;
736 }
737 
738 void Graph::SetLayersOutOfOrder()
739 {
740  m_LayersInOrder = false;
741 }
742 
743 } // namespace armnn
armnn::OutputSlot::GetEdgeStrategies
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition: Layer.hpp:146
armnn::Compute::Undefined
@ Undefined
armnn::OutputSlot::SetTensorHandleFactory
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:213
armnn::InputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo tensorInfo) override
Sets the TensorInfo for this InputSlot.
Definition: Layer.cpp:609
armnn::SubgraphView::IOutputSlots
std::vector< IOutputSlot * > IOutputSlots
Definition: SubgraphView.hpp:60
armnn::ITensorHandle::Manage
virtual void Manage()=0
Indicate to the memory manager that this resource is active.
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::Graph::AllocateDynamicBuffers
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:207
armnn::DotAttributeSet::AddAttribute
DotAttributeSet & AddAttribute(const std::string &name, const std::stringstream &value)
Definition: DotSerializer.cpp:95
armnn::InputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
armnn::Graph::ForEachLayer
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:100
armnn::EdgeStrategy::DirectCompatibility
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::OutputSlot
Definition: Layer.hpp:100
armnn::Graph::SubstituteSubgraph
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:475
TypesUtils.hpp
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
Graph.hpp
BackendId.hpp
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
DotSerializer.hpp
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
NumericCast.hpp
armnn::DotEdge
Definition: DotSerializer.hpp:77
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::Layer
Definition: Layer.hpp:230
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::EdgeStrategy::CopyToTarget
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.
Assert.hpp
armnn::InputSlot::GetSlotIndex
unsigned int GetSlotIndex() const override
Definition: Layer.hpp:54
armnn::SubgraphView::IConnectableLayers
std::list< IConnectableLayer * > IConnectableLayers
Definition: SubgraphView.hpp:62
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:132
armnn::DotNode::GetContents
NodeContent & GetContents()
Definition: DotSerializer.hpp:107
armnn::SubgraphView::GetIConnectableLayers
const IConnectableLayers & GetIConnectableLayers() const
Definition: SubgraphView.cpp:281
armnn::EdgeStrategy::Undefined
@ Undefined
armnn::NodeContent::AddContent
NodeContent & AddContent(const std::string &content)
Definition: DotSerializer.cpp:147
Logging.hpp
armnn::DotGraph
Definition: DotSerializer.hpp:125
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
IBackendInternal.hpp
LayersFwd.hpp
armnn::ITensorHandle::GetParent
virtual ITensorHandle * GetParent() const =0
Get the parent tensor if this is a subtensor.
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::DotNode
Definition: DotSerializer.hpp:101
armnn::Graph::Print
Status Print(bool extended=false) const
Definition: Graph.cpp:68
armnn::SubgraphView::IInputSlots
std::vector< IInputSlot * > IInputSlots
Definition: SubgraphView.hpp:58
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::Layer::GetGuid
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:343
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::SubgraphView
The SubgraphView class represents a subgraph of a Graph.
Definition: SubgraphView.hpp:31
armnn::Status::Success
@ Success
armnn::Layer::GetOutputSlots
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:259
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::MemImport
@ MemImport
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::Layer::GetNumInputSlots
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:334
armnn::InputSlot
Definition: Layer.hpp:42
armnn::LayerType::FullyConnected
@ FullyConnected
SubgraphView.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::Graph::AddCompatibilityLayers
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices,...
Definition: Graph.cpp:330
armnn::DotEdge::GetAttributeSet
DotAttributeSet & GetAttributeSet()
Definition: DotSerializer.hpp:83
armnn::Status
Status
Definition: Types.hpp:42
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::Layer::Clone
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
armnn::Layer::BeginOutputSlots
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:266
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::Graph::Graph
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition: Graph.hpp:98
armnn::Graph::TopologicalSort
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:191
armnn::SubgraphView::GetIInputSlots
const IInputSlots & GetIInputSlots() const
Definition: SubgraphView.cpp:236
armnn::EdgeStrategy::ExportToTarget
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
armnn::OutputSlot::GetConnections
const std::vector< InputSlot * > & GetConnections() const
Definition: Layer.hpp:145
armnn::OutputSlot::SetEdgeStrategy
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:223
armnn::Graph::SerializeToDot
Status SerializeToDot(std::ostream &stream)
Definition: Graph.cpp:146
armnn::Layer::GetBackendId
const BackendId & GetBackendId() const
Definition: Layer.hpp:290
armnn::DotDefaults
Definition: DotSerializer.hpp:114
armnn::BackendId
Definition: BackendId.hpp:75
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:218
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::SubgraphView::ForEachIConnectableLayer
void ForEachIConnectableLayer(Func func) const
Definition: SubgraphView.hpp:46
armnn::Layer::SetBackendId
void SetBackendId(const BackendId &id) override
Set the backend of the IConnectableLayer.
Definition: Layer.hpp:291
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::DotDefaults::GetAttributeSet
DotAttributeSet & GetAttributeSet()
Definition: DotSerializer.hpp:120
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::NullPointerException
Definition: Exceptions.hpp:146
armnn::Graph
Definition: Graph.hpp:30
armnn::SubgraphView::GetIOutputSlots
const IOutputSlots & GetIOutputSlots() const
Definition: SubgraphView.cpp:241
armnn::Status::Failure
@ Failure
armnn::ITensorHandle::Allocate
virtual void Allocate()=0
Indicate to the memory manager that this resource is no longer active.
armnn::LayerType::Constant
@ Constant
ARMNN_THROW_INVALIDARG_MSG_IF_FALSE
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
Definition: Exceptions.hpp:210