ArmNN
 25.11
Loading...
Searching...
No Matches
Graph.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <Graph.hpp>
7#include <LayersFwd.hpp>
8
11
12#include <armnn/BackendId.hpp>
13#include <armnn/Logging.hpp>
14#include <armnn/TypesUtils.hpp>
17
18#include <fmt/format.h>
19
20#include <unordered_map>
21#include <DotSerializer.hpp>
22#include <sstream>
23
24namespace armnn
25{
26
27Graph::Graph(const Graph& other)
28: m_LayersInOrder(other.m_LayersInOrder)
29, m_AllowExpandedDims(other.m_AllowExpandedDims)
30, m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31, m_Profiler(other.m_Profiler)
32{
33 std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34
35 for (auto&& otherLayer : other.m_Layers)
36 {
37 Layer* const layer = otherLayer->Clone(*this);
38 otherToClonedMap.emplace(otherLayer, layer);
39 }
40
41 // Copies slot connections.
42 for (auto&& otherLayer : other.m_Layers)
43 {
44 Layer* const thisLayer = otherToClonedMap[otherLayer];
45
46 auto outputSlot = thisLayer->BeginOutputSlots();
47 for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48 {
49 for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50 {
51 const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52 Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53
54 InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55
56 if (otherInputSlot->IsTensorInfoOverridden())
57 {
58 inputSlot.SetTensorInfo(otherInputSlot->GetTensorInfo());
59 }
60 outputSlot->Connect(inputSlot);
61 }
62 outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
63 ++outputSlot;
64 }
65 }
66}
67
68Status Graph::Print(bool extended) const
69{
70 if (m_Layers.empty())
71 {
72 ARMNN_LOG(info) << "\n Graph is empty.\n";
73 return Status::Success;
74 }
75 ARMNN_LOG(info) << "\n";
76 ARMNN_LOG(info) << "Walking Pattern: \n";
77
78 for (auto&& it : TopologicalSort())
79 {
80 auto numInputSlots = it->GetNumInputSlots();
81 auto numOutputSlots = it->GetNumOutputSlots();
82
83 std::string guid;
84 if (extended)
85 {
86 guid += ":";
87 guid += std::to_string(it->GetGuid());
88 }
89 ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
90 << ":" << it->GetBackendId().Get()
91 << guid
92 << " has " << numInputSlots << " input slots"
93 << " and " << numOutputSlots << " output slots.";
94
95 for (auto i : it->GetInputSlots())
96 {
97 std::ostringstream message;
98 auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
99 unsigned int numDims = inputTensorShape.GetNumDimensions();
100
101 message << "The input slot has shape [ ";
102 for (unsigned int dim=0; dim < numDims; dim++)
103 {
104 message << inputTensorShape[dim] << ",";
105 }
106 message << " ]";
107 if (extended)
108 {
109 message << " Scale: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationScale();
110 message << " Offset: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationOffset();
111 message << " The input slot is connected to: ";
112 message << i.GetConnectedOutputSlot()->GetOwningIConnectableLayer().GetGuid();
113 }
114 ARMNN_LOG(info) << message.str();
115 }
116
117 for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
118 {
119 const armnn::Layer *layer = it;
120 std::ostringstream message;
121 auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
122 unsigned int numDims = outputTensorShape.GetNumDimensions();
123
124 message << "The output slot has shape [ ";
125 for (unsigned int dim=0; dim < numDims; dim++)
126 {
127 message << outputTensorShape[dim] << ",";
128 }
129 message << " ]";
130 if (extended)
131 {
132 message << " Scale: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationScale();
133 message << " Offset: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationOffset();
134 message << " The output slot is connected to: ";
135 message << layer->GetOutputSlots()[i].GetConnection(0)->GetOwningIConnectableLayer().GetGuid();
136 }
137 ARMNN_LOG(info) << message.str();
138 }
139 ARMNN_LOG(info) << "\n";
140 }
141 ARMNN_LOG(info) << "\n\n";
142
143 return Status::Success;
144}
145
146Status Graph::SerializeToDot(std::ostream& stream)
147{
148 {
149 DotGraph graph(stream, "Optimized");
150
151 {
152 // Default node attributes:
153 DotDefaults nodes(stream, "node");
154 nodes.GetAttributeSet()
155 .AddAttribute("shape", "record");
156 }
157
158 {
159 // Default edge attributes:
160 DotDefaults edges(stream, "edge");
161 edges.GetAttributeSet()
162 .AddAttribute("fontsize", 8)
163 .AddAttribute("fontcolor", "blue")
164 .AddAttribute("fontname", "arial-bold");
165 }
166
167 // First declares the nodes.
168 for (auto&& layer : m_Layers)
169 {
170 DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
171 // Extracts the layer parameters.
172 ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
173 node.GetContents().AddContent(name + " : " + value);
174 };
175 layer->SerializeLayerParameters(extractParams);
176 }
177
178 // Second declares the edges.
179 for (auto&& layer : m_Layers)
180 {
181 LayerGuid toId = layer->GetGuid();
182
183 for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
184 {
185 OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
186 LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
187 DotEdge edge(stream, fromId, toId);
188
189 // Now print the tensor shape on the edge.
190 {
191 // Constructs the label attribute with HTML markup.
192 std::stringstream ss;
193 ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
194 edge.GetAttributeSet().AddAttribute("label", ss);
195 }
196 }
197 }
198 }
199
200 if (stream.bad())
201 {
202 return Status::Failure;
203 }
204 return Status::Success;
205}
206
208{
209 // Layers must be sorted in topological order
210 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_LayersInOrder, "layers must be in order.");
211
212 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
213
214 std::unordered_set<const ITensorHandle*> preallocatedTensors;
215 std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
216
217 // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
218 // is a TensorHandle, the function just returns it
219 auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
220 {
221 ITensorHandle* ancestor = subTensorHandle;
222 while (ancestor && ancestor->GetParent())
223 {
224 ancestor = ancestor->GetParent();
225 }
226 return ancestor;
227 };
228
229 // Checks whether a TensorHandle has been pre-allocated
230 auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
231 {
232 return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
233 };
234
235 // Constant tensor handles need to last from the beginning of execution till the end,
236 // therefore we pre-allocate them upfront
237 for (auto&& layer : m_Layers)
238 {
239 if (layer->GetType() == LayerType::Constant)
240 {
241 for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
242 {
243 ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
244
245 if (tensorHandle && !IsPreallocated(tensorHandle))
246 {
247 tensorHandle->Allocate();
248 preallocatedTensors.insert(tensorHandle);
249 }
250 }
251 }
252 }
253
254 // Iterate over the network in topological order
255 for (auto&& layer : m_Layers)
256 {
257 // Count the amount of times each output slot references a certain buffer (ITensorHandle).
258 // The first time we encounter a new tensor handle, we start managing its lifetime.
259 for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
260 {
261 ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
262
263 if (tensorHandle && !IsPreallocated(tensorHandle))
264 {
265 unsigned int numConnections = slot->GetNumConnections();
266 if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
267 {
268 handleReferenceCounts[tensorHandle] = numConnections;
269 tensorHandle->Manage();
270 if (handleReferenceCounts[tensorHandle] == 0u)
271 {
272 // if nobody consumes this tensor we call Allocate()
273 tensorHandle->Allocate();
274 }
275 }
276 else
277 {
278 handleReferenceCounts[tensorHandle] += numConnections;
279 }
280 }
281 }
282
283 // Loop through the input slots in the same layer and decrement the reference counter associated
284 // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
285 for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
286 {
287 ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
288 slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
289
290 if (tensorHandle && !IsPreallocated(tensorHandle))
291 {
292 --handleReferenceCounts[tensorHandle];
293
294 if (handleReferenceCounts[tensorHandle] == 0u)
295 {
296 // Stop managing lifetime of tensor handle
297 tensorHandle->Allocate();
298 handleReferenceCounts.erase(tensorHandle);
299 }
300 }
301 }
302 }
303
304 return Status::Success;
305}
306
308{
309 if (!m_LayersInOrder)
310 {
311 // Resets layer order.
312 for (auto&& it : m_Layers)
313 {
314 it->ResetPriority();
315 }
316
317 auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
318 {
319 return layerA->GetPriority() < layerB->GetPriority();
320 };
321
322 m_Layers.sort(compareLayerPriority);
323
324 m_LayersInOrder = true;
325 }
326
327 return *this;
328}
329
330void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendInternal>>& backends,
332{
333 // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
334 // connections to other layers).
335 auto MayNeedCompatibilityLayer = [](const Layer& layer)
336 {
337 // All layers should have been associated with a valid compute device at this point.
338 if (layer.GetBackendId() == Compute::Undefined)
339 {
340 throw armnn::Exception("AddCompatibilityLayers: All layers must be assigned to a backend at this point.");
341 }
342 // Does not need another compatibility layer if a copy or import layer is already present.
343 return layer.GetType() != LayerType::MemCopy &&
344 layer.GetType() != LayerType::MemImport;
345 };
346
347 auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
348 {
349 return strategy == EdgeStrategy::CopyToTarget ||
351 };
352
353 ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
354 {
355 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(srcLayer, "source layer must not be null.");
356
357 if (!MayNeedCompatibilityLayer(*srcLayer))
358 {
359 // The current layer does not need copy layers, move to the next one
360 return;
361 }
362
363 const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
364 for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
365 {
366 OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
367 const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
368 const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
369 for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
370 {
371 InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
372 if (!dstInputSlot)
373 {
374 throw armnn::Exception("dstInputSlot must not be null.");
375 }
376
377 EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
378 if (strategy == EdgeStrategy::Undefined)
379 {
380 throw armnn::Exception("Undefined memory strategy found "
381 "while adding copy layers for compatibility");
382 }
383
384 const Layer& dstLayer = dstInputSlot->GetOwningLayer();
385 if (MayNeedCompatibilityLayer(dstLayer) &&
386 IsCompatibilityStrategy(strategy))
387 {
388 // A copy layer is needed in between the source and destination layers.
389 // Record the operation rather than attempting to modify the graph as we go.
390 // (invalidating iterators)
391 const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
392 srcLayer->GetName(),
393 srcOutputIndex,
394 dstLayer.GetName(),
395 dstInputSlot->GetSlotIndex());
396 Layer* compLayer = nullptr;
397 if (strategy == EdgeStrategy::CopyToTarget)
398 {
399 compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
400 }
401 else
402 {
403 if (strategy != EdgeStrategy::ExportToTarget)
404 {
405 throw armnn::Exception("Invalid edge strategy found.");
406 }
407
408 compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
409 }
410
411 compLayer->SetBackendId(dstLayer.GetBackendId());
412
413 OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
414 auto backendIt = backends.find(dstLayer.GetBackendId());
415 if (backendIt != backends.end() &&
416 backendIt->second &&
417 backendIt->second->SupportsTensorAllocatorAPI())
418 {
419 auto backend = backendIt->second.get();
420 auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
421 bool found = false;
422
423 for (auto preference : tensorHandleFactoryIds)
424 {
425 auto factory = registry.GetFactory(preference);
426 if (factory)
427 {
428 auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
429 auto srcFactory = registry.GetFactory(srcPref);
430
431 if (srcFactory)
432 {
433 bool canExportImport =
434 (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
435
436 if (factory->SupportsMapUnmap() || canExportImport)
437 {
438 compOutputSlot.SetTensorHandleFactory(preference);
439 found = true;
440 break;
441 }
442 }
443 }
444 }
445
446 if (!found)
447 {
449 }
450 }
451 else
452 {
454 }
455
456 // The output strategy of a compatibility layer is always DirectCompatibility.
458
459 // Recalculate the connection index on the previous layer as we have just inserted into it.
460 const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
461 auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
462 std::find(newSourceConnections.begin(),
463 newSourceConnections.end(),
464 &compLayer->GetInputSlot(0)));
465
466 // The input strategy of a compatibility layer is always DirectCompatibilty.
467 srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
469 }
470 }
471 }
472 });
473}
474
476{
477 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(substituteLayer, "substituteLayer should not be null");
478
479 // Create a new sub-graph with only the given layer, using
480 // the given sub-graph as a reference of which parent graph to use
481 SubgraphView substituteSubgraph(substituteLayer);
482
483 SubstituteSubgraph(subgraph, substituteSubgraph);
484}
485
486void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
487{
488 // Look through each layer in the new subgraph and add any that are not already a member of this graph
489 substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
490 {
491 if (std::find(std::begin(m_Layers),
492 std::end(m_Layers),
493 iConnectableLayer) == std::end(m_Layers))
494 {
495 auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
496 layer->Reparent(*this, m_Layers.end());
497 m_LayersInOrder = false;
498 }
499 });
500
501 ReplaceSubgraphConnections(subgraph, substituteSubgraph);
502 EraseSubgraphLayers(subgraph);
504}
505
506void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
507{
508 if (substituteSubgraph.GetIConnectableLayers().empty())
509 {
510 throw armnn::Exception("New sub-graph used for substitution must not be empty");
511 }
512
513 const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
514 std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
515 {
516 layer = PolymorphicDowncast<Layer*>(layer);
517 if (std::find(m_Layers.begin(), m_Layers.end(), layer) == m_Layers.end())
518 {
519 throw armnn::Exception("Substitute layer is not a member of graph");
520 }
521 });
522
523 const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
524 const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraph.GetIOutputSlots();
525
526 unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
527 unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
528
529 const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
530 const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
531
532 if (subgraphNumInputSlots != substituteSubgraphInputSlots.size())
533 {
534 throw armnn::Exception("subgraph and substitute subgraph input slot sizes must be the same.");
535 }
536
537 if (subgraphNumOutputSlots != substituteSubgraphOutputSlots.size())
538 {
539 throw armnn::Exception("subgraph and substitute subgraph output slot sizes must be the same.");
540 }
541
542 // Disconnect the sub-graph and replace it with the substitute sub-graph
543
544 // Step 1: process input slots
545 for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
546 {
547 IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
548 if (!subgraphInputSlot)
549 {
550 throw armnn::NullPointerException("subgraphInputSlot must not be null.");
551 }
552
553 // Only disconnect if the InputSlot has a connection, this might not be the case when
554 // dealing with working copies of SubgraphViews
555 // Note: we don't need this check for OutputSlot as it iterates over a vector of valid connections
556 if (subgraphInputSlot->GetConnection())
557 {
558 IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
559 InputSlot* inputSlot = PolymorphicDowncast<InputSlot*>(subgraphInputSlot);
560 bool isOverridden = inputSlot->IsTensorInfoOverridden();
561
562 if (!connectedOutputSlot)
563 {
564 throw armnn::NullPointerException("connectedOutputSlot must not be null.");
565 }
566
567 connectedOutputSlot->Disconnect(*subgraphInputSlot);
568
569 IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
570 if (!substituteInputSlot)
571 {
572 throw armnn::NullPointerException("substituteInputSlot must not be null.");
573 }
574
575 connectedOutputSlot->Connect(*substituteInputSlot);
576
577 if (isOverridden)
578 {
579 TensorInfo overridden = inputSlot->GetTensorInfo();
580 InputSlot* newInputSlot = PolymorphicDowncast<InputSlot*>(substituteInputSlot);
581 newInputSlot->SetTensorInfo(overridden);
582 }
583 }
584 }
585
586 // Step 2: process output slots
587 for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
588 {
589 auto subgraphOutputSlot =
590 PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
591 if (!subgraphOutputSlot)
592 {
593 throw armnn::NullPointerException("subgraphOutputSlot must not be null.");
594 }
595
596 auto substituteOutputSlot =
597 PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
598 if (!substituteOutputSlot)
599 {
600 throw armnn::NullPointerException("substituteOutputSlot must not be null.");
601 }
602
603 subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
604 }
605}
606
607void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
608{
609
610 for (auto iConnectableLayer : subgraph.GetIConnectableLayers())
611 {
612 auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
613 EraseLayer(layer);
614 }
615 subgraph.Clear();
616}
617
618/// For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
619/// LayerValidationException thrown if no TensorInfo is set.
620///
621/// @throws LayerValidationException
623{
624 for (auto&& layer : TopologicalSort())
625 {
626 if (layer->GetType() == armnn::LayerType::Constant)
627 {
628 for (auto&& output: layer->GetOutputSlots())
629 {
630 if (!output.IsTensorInfoSet())
631 {
632 std::ostringstream message;
633 message << "Output slot TensorInfo not set on "
634 << GetLayerTypeAsCString(layer->GetType())
635 << " layer \""
636 << layer->GetName()
637 << "\"";
638 throw LayerValidationException(message.str());
639 }
640 }
641 }
642 }
643}
644
646{
647 for (auto&& layer : TopologicalSort())
648 {
649 for (auto&& input : layer->GetInputSlots())
650 {
651 const IOutputSlot* source = input.GetConnectedOutputSlot();
652 if (source == NULL)
653 {
654 // Throws exception due to a layer input not being connected to an output slot.
655 // Verifies input slot weights and bias are set for FullyConnected layers.
656 ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
657 }
658
659 if (!source->IsTensorInfoSet())
660 {
661 std::ostringstream message;
662 message << "Output slot TensorInfo not set on "
663 << GetLayerTypeAsCString(layer->GetType())
664 << " layer "
665 << std::quoted(layer->GetName());
666 throw LayerValidationException(message.str());
667 }
668 }
669
670 if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
671 {
672 layer->ValidateTensorShapesFromInputs();
673 }
674 }
675}
676
677/// Throws exception due to a layer input not being connected to an output slot.
678/// Verifies weights and bias are set for layers on input slots 1
679/// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
680///
681/// @param layer constant pointer to a Layer object
682/// @param slotIndex input slot index of layer
683/// @throws LayerValidationException
684void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
685 unsigned int slotIndex)
686{
687 std::ostringstream message;
688 bool noWeightsAndBias = false;
689
690 if ((layer->GetType() == armnn::LayerType::FullyConnected ||
693 layer->GetType() == armnn::LayerType::DepthwiseConvolution2d) && slotIndex > 0)
694 {
695 message << std::endl;
696
697 // If weights are not set and is bias enabled, also check if bias is set
698 if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
699 {
700 const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
701 if (biasSource == NULL)
702 {
703 message << "Weights and bias layers not set." << std::endl;
704 noWeightsAndBias = true;
705 }
706 }
707
708 // Only weights or bias are not set
709 if (!noWeightsAndBias)
710 {
711 if (slotIndex == 1)
712 {
713 message << "Weights layer not set." << std::endl;
714 }
715 else
716 {
717 message << "Bias layer not set." << std::endl;
718 }
719 }
720 }
721
722 std::string slotString = noWeightsAndBias ? "1 & 2" : std::to_string(slotIndex);
723 message << "Input slot(s) "
724 << slotString
725 << " for "
726 << GetLayerTypeAsCString(layer->GetType())
727 << " not connected to an output slot. " << std::endl
728 << "Layer name: "
729 << std::quoted(layer->GetName());
730 throw LayerValidationException(message.str());
731}
732
733const std::shared_ptr<IProfiler>& Graph::GetProfiler() const
734{
735 return m_Profiler;
736}
737
739{
740 m_LayersInOrder = false;
741}
742
743} // namespace armnn
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
#define ARMNN_LOG(severity)
Definition Logging.hpp:212
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition Types.hpp:26
DotAttributeSet & AddAttribute(const std::string &name, const std::stringstream &value)
DotAttributeSet & GetAttributeSet()
DotAttributeSet & GetAttributeSet()
NodeContent & GetContents()
Base class for all ArmNN exceptions so that users can filter to just those.
friend class SubgraphView
Definition Graph.hpp:307
Status SerializeToDot(std::ostream &stream)
Definition Graph.cpp:146
void InferTensorInfos()
Definition Graph.cpp:645
LayerT * InsertNewLayer(InputSlot &insertBefore, Args &&... args)
Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itse...
Definition Graph.hpp:481
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition Graph.cpp:207
Status Print(bool extended=false) const
Definition Graph.cpp:68
void VerifyConstantLayerSetTensorInfo() const
For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
Definition Graph.cpp:622
const std::shared_ptr< IProfiler > & GetProfiler() const
Definition Graph.cpp:733
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition Graph.hpp:98
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition Graph.cpp:475
void SetLayersOutOfOrder()
Definition Graph.cpp:738
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition Graph.hpp:191
void ForEachLayer(Func func) const
Definition Graph.hpp:40
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal > > &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices,...
Definition Graph.cpp:330
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
An output connection slot for a layer.
Definition INetwork.hpp:54
virtual bool IsTensorInfoSet() const =0
static const FactoryId LegacyFactoryId
virtual void Manage()=0
Indicate to the memory manager that this resource is active.
virtual ITensorHandle * GetParent() const =0
Get the parent tensor if this is a subtensor.
virtual void Allocate()=0
Indicate to the memory manager that this resource is no longer active.
Layer & GetOwningLayer() const
Definition Layer.hpp:53
const OutputSlot * GetConnectedOutputSlot() const
Definition Layer.hpp:56
unsigned int GetSlotIndex() const override
Definition Layer.hpp:54
const std::vector< OutputSlot > & GetOutputSlots() const
Definition Layer.hpp:259
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
void SetBackendId(const BackendId &id) override
Set the backend of the IConnectableLayer.
Definition Layer.hpp:291
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition Layer.hpp:343
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition Layer.hpp:334
const char * GetName() const override
Returns the name of the layer.
Definition Layer.hpp:332
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition Layer.hpp:286
const BackendId & GetBackendId() const
Definition Layer.hpp:290
NodeContent & AddContent(const std::string &content)
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition Layer.cpp:223
Layer & GetOwningLayer() const
Definition Layer.hpp:132
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition Layer.hpp:146
const std::vector< InputSlot * > & GetConnections() const
Definition Layer.hpp:145
const TensorInfo & GetTensorInfo() const override
Definition Layer.cpp:100
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition Layer.cpp:213
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition Layer.cpp:218
The SubgraphView class represents a subgraph of a Graph.
const IConnectableLayers & GetIConnectableLayers() const
void ForEachIConnectableLayer(Func func) const
const IInputSlots & GetIInputSlots() const
const IOutputSlots & GetIOutputSlots() const
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
const TensorShape & GetShape() const
Definition Tensor.hpp:193
Copyright (c) 2021 ARM Limited and Contributors.
Status
enumeration
Definition Types.hpp:43
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
const char * GetLayerTypeAsCString(LayerType type)
@ ValidateOnly
Validate all output shapes.
Definition Types.hpp:239
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.