ArmNN
 25.11
Loading...
Searching...
No Matches
Layer.hpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
7#include "LayerFwd.hpp"
8
10#include <OutputHandler.hpp>
14#include "InternalTypes.hpp"
16#include "DllExport.hpp"
17
18#include <armnn/Types.hpp>
19#include <armnn/Tensor.hpp>
20#include <armnn/INetwork.hpp>
24
25#include <algorithm>
26#include <functional>
27#include <iostream>
28#include <list>
29#include <memory>
30#include <string>
31#include <vector>
33
34namespace armnn
35{
36
37class IWorkload;
39class Layer;
40class Graph;
41
42class InputSlot final : public IInputSlot
43{
44public:
45 explicit InputSlot(Layer& owner, unsigned int slotIndex)
46 : m_OwningLayer(owner)
47 , m_Connection(nullptr)
48 , m_SlotIndex(slotIndex)
49 {}
50
51 ~InputSlot();
52
53 Layer& GetOwningLayer() const { return m_OwningLayer; }
54 unsigned int GetSlotIndex() const override { return m_SlotIndex; }
55
56 const OutputSlot* GetConnectedOutputSlot() const { return m_Connection; }
57 OutputSlot* GetConnectedOutputSlot() { return m_Connection; }
58
59 const IConnectableLayer& GetOwningIConnectableLayer() const override;
61
62 /// Links the slot to an output slot or breaks an existing link if passing nullptr.
64 {
65 if (m_Connection != nullptr && source != nullptr)
66 {
67 throw InvalidArgumentException("Tried to connect an output slot to an input slot, "
68 "but the latter already has a connection");
69 }
70 m_Connection = source;
71 }
72
73 // Inserts single-output existing layer at this point in the graph.
74 void Insert(Layer& layer);
75
76 // InputSlot
77
78 const IOutputSlot* GetConnection() const override;
79 IOutputSlot* GetConnection() override;
80
81 /// Sets the TensorInfo for this InputSlot. This can be used to override the TensorInfo and if set will be returned
82 /// instead of the TensorInfo for the Connected OutputSlot.
83 void SetTensorInfo(const TensorInfo tensorInfo) override;
84 /// Gets the TensorInfo for this InputSlot. If the InputSlot's TensorInfo has not been set then this will get the
85 /// TensorInfo from the Connected TensorInfo.
86 const TensorInfo& GetTensorInfo() const override;
87 /// Returns true if this InputSlot either has an overridden TensorInfo for this InputSlot that was set through a
88 /// call to SetTensorInfo() or is Connected to an OutputSlot that has its TensorInfo set.
89 bool IsTensorInfoSet() const override;
90 /// Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorInfo().
91 bool IsTensorInfoOverridden() const override;
92
93private:
94 Layer& m_OwningLayer;
95 OutputSlot* m_Connection;
96 Optional<TensorInfo> m_OverriddenTensorInfo;
97 const unsigned int m_SlotIndex;
98};
99
100class OutputSlot final : public IOutputSlot
101{
102public:
103 explicit OutputSlot(Layer& owner, OutputHandler& outputHandler)
104 : m_OwningLayer(owner)
105 , m_OutputHandler(outputHandler)
106 , m_TensorHandleFactoryId(ITensorHandleFactory::LegacyFactoryId)
107 {}
108
109 OutputSlot(const OutputSlot&) = delete;
110 OutputSlot& operator=(const OutputSlot&) = delete;
112
113 OutputSlot(OutputSlot&&) = default;
114
116 {
117 try
118 {
119 // Coverity fix: DisconnectAll() may throw uncaught exceptions.
121 }
122 catch (const std::exception& e)
123 {
124 // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
125 // exception of type std::length_error.
126 // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
127 std::cerr << "WARNING: An error has occurred when disconnecting all output slots: "
128 << e.what() << std::endl;
129 }
130 }
131
132 Layer& GetOwningLayer() const { return m_OwningLayer; }
133
134 const IConnectableLayer& GetOwningIConnectableLayer() const override;
136
137 LayerGuid GetOwningLayerGuid() const override;
138
139 const OutputHandler& GetOutputHandler() const { return m_OutputHandler; }
140 OutputHandler& GetOutputHandler() { return m_OutputHandler; }
141
142 int Connect(InputSlot& destination);
143 void Disconnect(InputSlot& slot);
144
145 const std::vector<InputSlot*>& GetConnections() const { return m_Connections; }
146 const std::vector<EdgeStrategy>& GetEdgeStrategies() const { return m_EdgeStrategies; }
147
148 bool ValidateTensorShape(const TensorShape& shape) const;
149
150 // Disconnect all conections.
151 void DisconnectAll();
152
153 /// Moves all connections to another OutputSlot.
154 void MoveAllConnections(OutputSlot& destination);
155
156 // IOutputSlot
157
158 unsigned int GetNumConnections() const override { return armnn::numeric_cast<unsigned int>(m_Connections.size()); }
159 const InputSlot* GetConnection(unsigned int index) const override;
160 InputSlot* GetConnection(unsigned int index) override;
161
162 void SetTensorInfo(const TensorInfo& tensorInfo) override;
163 const TensorInfo& GetTensorInfo() const override;
164 bool IsTensorInfoSet() const override;
165
166 int Connect(IInputSlot& destination) override
167 {
168 return Connect(*PolymorphicDowncast<InputSlot*>(&destination));
169 }
170
171 void Disconnect(IInputSlot& slot) override
172 {
174 }
175
176 unsigned int CalculateIndexOnOwner() const override;
177
178 bool operator==(const OutputSlot& other) const;
179
182
183 void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy);
184 EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const;
185
186private:
187 void ValidateConnectionIndex(unsigned int index) const;
188
189 Layer& m_OwningLayer;
190 OutputHandler& m_OutputHandler;
191 std::vector<InputSlot*> m_Connections;
192
193 ITensorHandleFactory::FactoryId m_TensorHandleFactoryId;
194 std::vector<EdgeStrategy> m_EdgeStrategies;
195};
196
197// InputSlot inlines that need OutputSlot declaration.
198
200{
201 if (m_Connection != nullptr)
202 {
203 try
204 {
205 // Coverity fix: Disconnect() may throw uncaught exceptions.
206 m_Connection->Disconnect(*this);
207 }
208 catch (const std::exception& e)
209 {
210 // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
211 // exception of type std::length_error.
212 // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
213 std::cerr << "WARNING: An error has occurred when disconnecting an input slot: "
214 << e.what() << std::endl;
215 }
216 }
217}
218
221
222
224
225// Base layer class
226
227using LayerPriority = unsigned int;
228using AdditionalInfoObjectPtr = std::shared_ptr<void>;
229
231{
232public:
233 /// @param name - Optional name for the layer (may be nullptr).
234 Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
235 Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char* name);
236
237 void ExecuteStrategy(IStrategy& strategy) const override;
238
239
240 const std::string& GetNameStr() const
241 {
242 return m_LayerName;
243 }
244
245 const OutputHandler& GetOutputHandler(unsigned int i = 0) const
246 {
247 return m_OutputHandlers[i];
248 }
249
250 OutputHandler& GetOutputHandler(unsigned int i = 0)
251 {
252 return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
253 }
254
256 bool GetAllowExpandedDims() const { return m_AllowExpandedDims; };
257
258 const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
259 const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
260
261 // Allows non-const access to input slots, but don't expose vector (vector size is fixed at layer construction).
262 std::vector<InputSlot>::iterator BeginInputSlots() { return m_InputSlots.begin(); }
263 std::vector<InputSlot>::iterator EndInputSlots() { return m_InputSlots.end(); }
264
265 // Allows non-const access to output slots, but don't expose vector (vector size is fixed at layer construction).
266 std::vector<OutputSlot>::iterator BeginOutputSlots() { return m_OutputSlots.begin(); }
267 std::vector<OutputSlot>::iterator EndOutputSlots() { return m_OutputSlots.end(); }
268
269 // Checks whether the outputs of this layer don't have any connection.
271 {
272 unsigned int numConnections = 0;
273
274 for (auto&& output : GetOutputSlots())
275 {
276 numConnections += output.GetNumConnections();
277 }
278
279 return (GetNumOutputSlots() > 0) && (numConnections == 0);
280 }
281
282 // Used for sorting.
283 void ResetPriority() const;
285
286 LayerType GetType() const override { return m_Type; }
287
288 DataType GetDataType() const;
289
290 const BackendId& GetBackendId() const { return m_BackendId; }
291 void SetBackendId(const BackendId& id) override { m_BackendId = id; }
292
293 // Virtuals
294
295 virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const = 0;
296
297 virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
298 const IWorkloadFactory& factory,
299 const bool IsMemoryManaged = true);
300
301 /// Creates a dynamically-allocated copy of this layer.
302 /// @param graph - The Graph into which this Layer is being cloned.
303 virtual Layer* Clone(Graph& graph) const = 0;
304
305 void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const;
306
308
309 std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
310
311 /// Helper to serialize the layer parameters to string.
312 /// (currently used in DotSerializer and company).
314
315 // Free up the constant source data
316 virtual void ReleaseConstantData();
317
318 template<typename Op>
320 {
321 for (auto constant : GetConstantTensorsByRef())
322 {
323 if (constant.get())
324 {
325 op(constant);
326 }
327 }
328 };
329
330 // IConnectableLayer
331
332 const char* GetName() const override { return m_LayerName.c_str(); }
333
334 unsigned int GetNumInputSlots() const override { return static_cast<unsigned int>(m_InputSlots.size()); }
335 unsigned int GetNumOutputSlots() const override { return static_cast<unsigned int>(m_OutputSlots.size()); }
336
337 const InputSlot& GetInputSlot(unsigned int index) const override { return m_InputSlots.at(index); }
338 InputSlot& GetInputSlot(unsigned int index) override { return m_InputSlots.at(index); }
339 const OutputSlot& GetOutputSlot(unsigned int index = 0) const override { return m_OutputSlots.at(index); }
340 OutputSlot& GetOutputSlot(unsigned int index = 0) override { return m_OutputSlots.at(index); }
341
342 void SetGuid(LayerGuid guid) { m_Guid = guid; }
343 LayerGuid GetGuid() const final { return m_Guid; }
344
345 void AddRelatedLayerName(const std::string layerName) { m_RelatedLayerNames.emplace_back(layerName); }
346
347 const std::list<std::string>& GetRelatedLayerNames() { return m_RelatedLayerNames; }
348
349 virtual void Reparent(Graph& dest, std::list<Layer*>::const_iterator iterator) = 0;
350
352 {
353 m_BackendHint = backend;
354 }
355 Optional<BackendId> GetBackendHint() const { return m_BackendHint; }
356
358 {
359 m_ShapeInferenceMethod = shapeInferenceMethod;
360 }
361
362 void SetAllowExpandedDims(bool allowExpandedDims)
363 {
364 m_AllowExpandedDims = allowExpandedDims;
365 }
366
367 template<typename T>
368 std::shared_ptr<T> GetAdditionalInformation() const
369 {
370 return std::static_pointer_cast<T>(m_AdditionalInfoObject);
371 }
372
374 {
375 m_AdditionalInfoObject = additionalInfo;
376 }
377
378 virtual const BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
379
380protected:
381 // Graph needs access to the virtual destructor.
382 friend class Graph;
383 virtual ~Layer() = default;
384
385 template <typename QueueDescriptor>
387 {
388 WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos);
389 CollectWorkloadInputs(dataCollector);
390 }
391
392 template <typename QueueDescriptor>
394 {
395 WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos);
396 CollectWorkloadOutputs(dataCollector);
397 }
398
399 void ValidateAndCopyShape(const TensorShape& outputShape,
400 const TensorShape& inferredShape,
401 const ShapeInferenceMethod shapeInferenceMethod,
402 const std::string& layerName,
403 const unsigned int outputSlotIndex = 0);
404
405 void VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod);
406
407 /// Helper function to reduce duplication in *Layer::CreateWorkload.
408 template <typename QueueDescriptor>
410 {
414 info.m_Name = GetName();
415 return info;
416 }
417
418 template <typename LayerType, typename ... Params>
419 LayerType* CloneBase(Graph& graph, Params&& ... params) const;
420
421 // Retrieve the Handles to the constants
422 // Marking this as override and having this here keeps IConnectable abstract with only pure virtual function
423 virtual ConstantTensors GetConstantTensorsByRef() override final;
424
425 // Retrieve the Handles to the constants
426 // Marking this as override and having this here keeps IConnectable abstract with only pure virtual function
428
429 // "Blob"
431
432 // Utility method to set a pointer in the queueDescriptor to the "blob" location in the layer
433 void SetAdditionalInfo(QueueDescriptor& descriptor) const;
434
435private:
436 void CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const;
437 void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const;
438
439protected:
440 std::vector<OutputHandler> m_OutputHandlers;
442
443private:
444 const std::string m_LayerName;
445
446 std::vector<InputSlot> m_InputSlots;
447 std::vector<OutputSlot> m_OutputSlots;
448
449 const LayerType m_Type;
450 BackendId m_BackendId;
451 Optional<BackendId> m_BackendHint;
452
453 /// Used for sorting.
454 mutable LayerPriority m_Priority = 0;
455 mutable bool m_Visiting = false;
456
457 bool m_AllowExpandedDims = false;
458
459 LayerGuid m_Guid;
460
461 std::list<std::string> m_RelatedLayerNames;
462
463 /// returned by layers which have no parameters associated with them.
464 /// has to be a member as it is returned as a const reference
465 /// declared static so that there is only ever one of them in memory
466 ARMNN_DLLEXPORT static NullDescriptor m_NullDescriptor;
467};
468
469// A layer user-provided data can be bound to (e.g. inputs, outputs).
470class BindableLayer : public Layer
471{
472public:
473 BindableLayer(unsigned int numInputSlots,
474 unsigned int numOutputSlots,
475 LayerType type,
476 const char* name,
478 : Layer(numInputSlots, numOutputSlots, type, name)
479 , m_Id(id)
480 {
481 }
482
483 LayerBindingId GetBindingId() const { return m_Id; };
484
485 void ExecuteStrategy(IStrategy& strategy) const override
486 {
487 strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName(), GetBindingId());
488 }
489
490protected:
491 ~BindableLayer() = default;
492
493private:
494 LayerBindingId m_Id;
495};
496
497} //namespace armnn
#define ARMNN_DLLEXPORT
Definition DllExport.hpp:17
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition Types.hpp:26
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition Layer.hpp:485
LayerBindingId GetBindingId() const
Definition Layer.hpp:483
BindableLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name, LayerBindingId id)
Definition Layer.hpp:473
~BindableLayer()=default
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > > > ConstantTensors
Definition INetwork.hpp:136
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > > > ImmutableConstantTensors
Definition INetwork.hpp:141
An input connection slot for a layer.
Definition INetwork.hpp:26
An output connection slot for a layer.
Definition INetwork.hpp:54
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
Workload interface to enqueue a layer computation.
Definition IWorkload.hpp:14
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition Layer.cpp:599
bool IsTensorInfoOverridden() const override
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
Definition Layer.cpp:631
const IOutputSlot * GetConnection() const override
Definition Layer.hpp:219
OutputSlot * GetConnectedOutputSlot()
Definition Layer.hpp:57
void Insert(Layer &layer)
Definition Layer.cpp:53
void SetTensorInfo(const TensorInfo tensorInfo) override
Sets the TensorInfo for this InputSlot.
Definition Layer.cpp:609
InputSlot(Layer &owner, unsigned int slotIndex)
Definition Layer.hpp:45
Layer & GetOwningLayer() const
Definition Layer.hpp:53
const OutputSlot * GetConnectedOutputSlot() const
Definition Layer.hpp:56
bool IsTensorInfoSet() const override
Returns true if this InputSlot either has an overridden TensorInfo for this InputSlot that was set th...
Definition Layer.cpp:626
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition Layer.cpp:614
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition Layer.hpp:63
unsigned int GetSlotIndex() const override
Definition Layer.hpp:54
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition Layer.cpp:410
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const =0
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition Layer.hpp:335
const std::vector< OutputSlot > & GetOutputSlots() const
Definition Layer.hpp:259
bool IsOutputUnconnected()
Definition Layer.hpp:270
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
Definition Layer.hpp:409
void SetBackendId(const BackendId &id) override
Set the backend of the IConnectableLayer.
Definition Layer.hpp:291
void SetAllowExpandedDims(bool allowExpandedDims)
Definition Layer.hpp:362
void AddRelatedLayerName(const std::string layerName)
Definition Layer.hpp:345
virtual void ReleaseConstantData()
Definition Layer.cpp:336
ShapeInferenceMethod GetShapeInferenceMethod() const
Definition Layer.hpp:255
const std::string & GetNameStr() const
Definition Layer.hpp:240
void OperateOnConstantTensors(Op op)
Definition Layer.hpp:319
std::vector< InputSlot >::iterator EndInputSlots()
Definition Layer.hpp:263
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition Layer.cpp:308
void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
Definition Layer.hpp:357
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition Layer.hpp:351
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition Layer.cpp:526
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition Layer.cpp:571
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition Layer.hpp:393
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition Layer.cpp:260
const std::vector< InputSlot > & GetInputSlots() const
Definition Layer.hpp:258
OutputHandler & GetOutputHandler(unsigned int i=0)
Definition Layer.hpp:250
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition Layer.cpp:432
void SetGuid(LayerGuid guid)
Definition Layer.hpp:342
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
virtual void ValidateTensorShapesFromInputs()=0
const std::list< std::string > & GetRelatedLayerNames()
Definition Layer.hpp:347
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition Layer.hpp:266
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition Layer.hpp:343
virtual ConstantTensors GetConstantTensorsByRef() override final
Definition Layer.cpp:576
LayerType * CloneBase(Graph &graph, Params &&... params) const
std::vector< OutputHandler > m_OutputHandlers
Definition Layer.hpp:440
InputSlot & GetInputSlot(unsigned int index) override
Get the input slot handle by slot index.
Definition Layer.hpp:338
OutputSlot & GetOutputSlot(unsigned int index=0) override
Get the output slot handle by slot index.
Definition Layer.hpp:340
void ResetPriority() const
Definition Layer.cpp:354
virtual ~Layer()=default
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
std::shared_ptr< T > GetAdditionalInformation() const
Definition Layer.hpp:368
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition Layer.hpp:245
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition Layer.hpp:334
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
Definition Layer.cpp:540
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition Layer.hpp:430
const char * GetName() const override
Returns the name of the layer.
Definition Layer.hpp:332
bool GetAllowExpandedDims() const
Definition Layer.hpp:256
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition Layer.hpp:286
virtual const BaseDescriptor & GetParameters() const override
If the layer has a descriptor return it.
Definition Layer.hpp:378
std::vector< InputSlot >::iterator BeginInputSlots()
Definition Layer.hpp:262
const BackendId & GetBackendId() const
Definition Layer.hpp:290
Optional< BackendId > GetBackendHint() const
Definition Layer.hpp:355
virtual void Reparent(Graph &dest, std::list< Layer * >::const_iterator iterator)=0
void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr &additionalInfo)
Definition Layer.hpp:373
DataType GetDataType() const
Definition Layer.cpp:345
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition Layer.cpp:457
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition Layer.cpp:303
LayerPriority GetPriority() const
Definition Layer.cpp:360
friend class Graph
Definition Layer.hpp:382
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition Layer.hpp:267
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition Layer.hpp:386
ShapeInferenceMethod m_ShapeInferenceMethod
Definition Layer.hpp:441
const InputSlot * GetConnection(unsigned int index) const override
Definition Layer.cpp:83
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition Layer.cpp:589
OutputSlot(OutputSlot &&)=default
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition Layer.cpp:156
int Connect(IInputSlot &destination) override
Definition Layer.hpp:166
unsigned int GetNumConnections() const override
Definition Layer.hpp:158
unsigned int CalculateIndexOnOwner() const override
Definition Layer.cpp:172
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition Layer.cpp:223
OutputHandler & GetOutputHandler()
Definition Layer.hpp:140
OutputSlot(Layer &owner, OutputHandler &outputHandler)
Definition Layer.hpp:103
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition Layer.cpp:228
void DisconnectAll()
Definition Layer.cpp:147
LayerGuid GetOwningLayerGuid() const override
Definition Layer.cpp:208
const OutputHandler & GetOutputHandler() const
Definition Layer.hpp:139
OutputSlot & operator=(const OutputSlot &)=delete
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition Layer.cpp:95
Layer & GetOwningLayer() const
Definition Layer.hpp:132
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition Layer.hpp:146
bool operator==(const OutputSlot &other) const
Definition Layer.cpp:185
const std::vector< InputSlot * > & GetConnections() const
Definition Layer.hpp:145
OutputSlot(const OutputSlot &)=delete
bool IsTensorInfoSet() const override
Definition Layer.cpp:105
bool ValidateTensorShape(const TensorShape &shape) const
Definition Layer.cpp:114
void Disconnect(InputSlot &slot)
Definition Layer.cpp:131
OutputSlot & operator=(OutputSlot &&)=delete
void Disconnect(IInputSlot &slot) override
Definition Layer.hpp:171
const TensorInfo & GetTensorInfo() const override
Definition Layer.cpp:100
int Connect(InputSlot &destination)
Definition Layer.cpp:123
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition Layer.cpp:213
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition Layer.cpp:218
Copyright (c) 2021 ARM Limited and Contributors.
std::shared_ptr< void > AdditionalInfoObjectPtr
Definition Layer.hpp:228
unsigned int LayerPriority
Definition Layer.hpp:227
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition Types.hpp:311
DataLayout
Definition Types.hpp:63
DataType
Definition Types.hpp:49
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition Types.hpp:237
Base class for all descriptors.
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.