ArmNN
 24.08
Layer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Layer.hpp"
6 
7 #include "Graph.hpp"
8 
11 
13 
15 
16 #include <client/include/IProfilingService.hpp>
17 
18 #include <fmt/format.h>
19 
20 #include <numeric>
21 
22 namespace armnn
23 {
24 
25 // Instantiate the static member variable
26 NullDescriptor Layer::m_NullDescriptor;
27 
29 {
30  switch (layer.GetType())
31  {
35  {
36  if (layer.GetNumInputSlots() != 2 && layer.GetNumInputSlots() != 3)
37  {
38  throw armnn::Exception("layer must have either 2 or 3 input slots.");
39  }
40  break;
41  }
42  default:
43  {
44  if (layer.GetNumInputSlots() != 1)
45  {
46  throw armnn::Exception("layer must have one input slot.");
47  }
48  break;
49  }
50  }
51 }
52 
54 {
55  if (layer.GetNumOutputSlots() != 1)
56  {
57  throw armnn::Exception("layer must have one output slot.");
58  }
59 
60  OutputSlot* const prevSlot = GetConnectedOutputSlot();
61 
62  if (prevSlot != nullptr)
63  {
64  // Disconnects parent from this.
65  prevSlot->Disconnect(*this);
66 
68 
69  // Connects inserted layer to parent.
70  int idx = prevSlot->Connect(layer.GetInputSlot(0));
71  prevSlot->SetEdgeStrategy(armnn::numeric_cast<unsigned int>(idx), EdgeStrategy::Undefined);
72 
73  // Sets tensor info for inserted layer.
74  const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
75  layer.GetOutputHandler().SetTensorInfo(tensorInfo);
76  }
77 
78  // Connects inserted layer to this.
79  layer.GetOutputSlot(0).Connect(*this);
81 }
82 
83 const InputSlot* OutputSlot::GetConnection(unsigned int index) const
84 {
85  ValidateConnectionIndex(index);
86  return m_Connections[index];
87 }
88 
90 {
91  ValidateConnectionIndex(index);
92  return m_Connections[index];
93 }
94 
95 void OutputSlot::SetTensorInfo(const TensorInfo& tensorInfo)
96 {
97  GetOutputHandler().SetTensorInfo(tensorInfo);
98 }
99 
101 {
102  return GetOutputHandler().GetTensorInfo();
103 }
104 
106 {
107  if (GetOwningLayer().GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
108  {
110  }
112 }
113 
115 {
116  if (!IsTensorInfoSet())
117  {
118  throw armnn::Exception("TensorInfo must be set in order to validate the shape.");
119  }
120  return shape == m_OutputHandler.GetTensorInfo().GetShape();
121 }
122 
124 {
125  destination.SetConnection(this);
126  m_Connections.push_back(&destination);
127  m_EdgeStrategies.push_back(EdgeStrategy::Undefined);
128  return armnn::numeric_cast<int>(m_Connections.size() - 1);
129 }
130 
132 {
133  slot.SetConnection(nullptr);
134  auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
135 
136  if (it == m_Connections.end())
137  {
138  return;
139  }
140 
141  auto idx = std::distance(m_Connections.begin(), it);
142  m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
143 
144  m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
145 }
146 
148 {
149  while (GetNumConnections() > 0)
150  {
151  InputSlot& connection = *GetConnection(0);
152  Disconnect(connection);
153  }
154 }
155 
157 {
158  while (GetNumConnections() > 0)
159  {
160  if (m_EdgeStrategies[0] != EdgeStrategy::Undefined)
161  {
162  throw armnn::Exception("Cannot move connections once memory strategies have be established.");
163  }
164 
165  InputSlot& connection = *GetConnection(0);
166  Disconnect(connection);
167  destination.Connect(connection);
169  }
170 }
171 
173 {
174  for (unsigned int i = 0; i < GetOwningLayer().GetNumOutputSlots(); i++)
175  {
176  if (GetOwningLayer().GetOutputSlot(i) == (*this))
177  {
178  return i;
179  }
180  }
181  throw armnn::Exception("Did not find slot on owner.");
182  return 0; // Error
183 }
184 
185 bool OutputSlot::operator==(const OutputSlot& other) const
186 {
187  bool isSame = other.GetNumConnections() == GetNumConnections();
188  if (!isSame)
189  {
190  return false;
191  }
192 
193  for (unsigned int i = 0; i < GetNumConnections(); i++)
194  {
195  isSame &= other.GetConnection(i) == GetConnection(i);
196  }
197  return isSame;
198 }
199 
200 void OutputSlot::ValidateConnectionIndex(unsigned int index) const
201 {
202  if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
203  {
204  throw InvalidArgumentException((fmt::format("GetConnection: Invalid index {} provided", index)));
205  }
206 }
207 
209 {
210  return GetOwningLayer().GetGuid();
211 }
212 
214 {
215  m_TensorHandleFactoryId = id;
216 }
217 
219 {
220  return m_TensorHandleFactoryId;
221 }
222 
223 void OutputSlot::SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
224 {
225  m_EdgeStrategies[connectionIndex] = strategy;
226 }
227 
229 {
230  return m_EdgeStrategies[connectionIdx];
231 }
232 
233 Layer::Layer(unsigned int numInputSlots,
234  unsigned int numOutputSlots,
235  LayerType type,
236  DataLayout layout,
237  const char* name)
238 : m_OutputHandlers(numOutputSlots)
239 , m_ShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly)
240 , m_LayerName(name ? name : "")
241 , m_Type(type)
242 , m_BackendId()
243 , m_BackendHint(EmptyOptional())
244 , m_Guid(arm::pipe::IProfilingService::GetNextGuid())
245 {
246  IgnoreUnused(layout);
247  m_InputSlots.reserve(numInputSlots);
248  for (unsigned int i = 0; i < numInputSlots; ++i)
249  {
250  m_InputSlots.emplace_back(*this, i);
251  }
252 
253  m_OutputSlots.reserve(numOutputSlots);
254  for (unsigned int i = 0; i < numOutputSlots; ++i)
255  {
256  m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
257  }
258 }
259 
260 Layer::Layer(unsigned int numInputSlots,
261  unsigned int numOutputSlots,
262  LayerType type,
263  const char* name)
264 : Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
265 {
266 }
267 
268 void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
269 {
270  for (auto&& inputSlot : GetInputSlots())
271  {
272  // The graph must be well-formed at this point.
273  if (!inputSlot.GetConnection())
274  {
275  throw armnn::Exception("input slot must have valid connection.");
276  }
277  const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
278 
279  if (inputSlot.IsTensorInfoOverridden() && outputHandler.GetData())
280  {
281  auto handler = outputHandler.GetData()->DecorateTensorHandle(inputSlot.GetTensorInfo());
282 
283  if (handler)
284  {
285  // Add overridden TensorHandle
286  dataCollector.Push(handler.get(), inputSlot.GetTensorInfo());
287  continue;
288  }
289  }
290  // Add default TensorHandle
291  dataCollector.Push(outputHandler.GetData(), inputSlot.GetTensorInfo());
292  }
293 }
294 
295 void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
296 {
297  for (auto&& outputHandler : m_OutputHandlers)
298  {
299  outputHandler.CollectWorkloadOutputs(dataCollector);
300  }
301 }
302 
304 {
306 }
307 
309  const IWorkloadFactory& workloadFactory,
310  const bool IsMemoryManaged)
311 {
312  for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
313  {
314 
315  OutputSlot& slot = GetOutputSlot(idx);
317 
318  OutputHandler& handler = GetOutputHandler(idx);
319  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
320  {
321  handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
322  }
323  else
324  {
325  ITensorHandleFactory* handleFactory;
326  handleFactory = registry.GetFactory(factoryId);
327  if (!handleFactory)
328  {
329  throw armnn::NullPointerException("handleFactory must not be null.");
330  }
331  handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
332  }
333  }
334 }
335 
337 {
338  // Now free up the static data.
339  OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
340  {
341  handle.reset();
342  });
343 }
344 
346 {
347  if (GetNumInputSlots() > 0) // Ignore the input layer.
348  {
349  return GetInputSlot(0).GetTensorInfo().GetDataType();
350  }
352 }
353 
355 {
356  m_Priority = 0;
357  m_Visiting = false;
358 }
359 
361 {
362  constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
363  constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
364 
365  if (GetType() == LayerType::Input)
366  {
367  m_Priority = inputPrio;
368  }
369  else if (GetType() == LayerType::Output)
370  {
371  m_Priority = outputPrio;
372  }
373  else if (m_Priority == 0)
374  {
375  if (m_Visiting)
376  {
377  throw GraphValidationException("Graph has circular dependencies: cannot walk");
378  }
379 
380  auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
381  {
382  const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
383  if (outputSlot)
384  {
385  const Layer& input = outputSlot->GetOwningLayer();
386  return std::max(prio, input.GetPriority());
387  }
388  else
389  {
390  // unconnected input slot
391  return prio;
392  }
393  };
394 
395  m_Visiting = true;
396  LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
397  m_Visiting = false;
398 
399  if (parentPrio >= outputPrio)
400  {
401  throw GraphValidationException("Graph has too many edges");
402  }
403 
404  m_Priority = parentPrio + 1U;
405  }
406 
407  return m_Priority;
408 }
409 
410 void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
411 {
412  if (GetNumInputSlots() != expectedConnections)
413  {
414  throw armnn::Exception("input slots must match expected connections.");
415  }
416 
417  for (unsigned int i=0; i<expectedConnections; ++i)
418  {
419  if (GetInputSlot(i).GetConnection() == nullptr)
420  {
422  fmt::format("Input connection #{0} must be connected "
423  "for {1} layer {2} {3}",
424  i,
426  GetNameStr(),
427  location.AsString()));
428  }
429  }
430 }
431 
432 std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
433 {
434  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(GetNumInputSlots() != 0, "input slots must not be zero.");
435  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(GetNumOutputSlots() != 0, "output slots must not be zero.");
436 
437  // By default we return what we got, meaning the output shape(s) are the same as the input(s).
438  // This only works if the number of inputs and outputs are the same. Since we are in the Layer
439  // base class, this means the implementation needs to be overridden in the specific layers for
440  // the other cases. So the missing implementation justifies the UnimplementedException.
441 
443  {
445  fmt::format("Default implementation for InferOutputShapes can only be used for "
446  "layers with the same number of input and output slots. This doesn't "
447  "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
449  GetNameStr(),
452  CHECK_LOCATION().AsString()));
453  }
454  return inputShapes;
455 }
456 
457 void Layer::ValidateAndCopyShape(const TensorShape& outputShape,
458  const TensorShape& inferredShape,
459  const ShapeInferenceMethod shapeInferenceMethod,
460  const std::string& layerName,
461  const unsigned int outputSlotIndex)
462 {
463  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
464  {
465  if (m_AllowExpandedDims)
466  {
467  std::vector<unsigned int> outputDims = armnnUtils::SqueezeDims(outputShape);
468  std::vector<unsigned int> inferredDims = armnnUtils::SqueezeDims(inferredShape);
469 
470  if (outputDims.size() != inferredDims.size())
471  {
472  std::stringstream ss;
473  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
474  "] does not match the inferred shape. ";
475  ss << outputShape << " != " << inferredShape;
476  throw LayerValidationException(ss.str());
477  }
478  for (unsigned int i = 0; i < outputDims.size(); ++i)
479  {
480  if (outputDims[i] != inferredDims[i])
481  {
482  std::stringstream ss;
483  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
484  "] does not match the inferred shape at dimension index [";
485  ss << i << "] " << outputShape << " != " << inferredShape;
486  throw LayerValidationException(ss.str());
487  }
488  }
489  return;
490  }
491  else
492  {
493  ConditionalThrowIfNotEqual<LayerValidationException>(
494  layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
495  outputShape,
496  inferredShape);
497  return;
498  }
499  }
500 
501  if (outputShape.GetDimensionality() == Dimensionality::Specified)
502  {
503  for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i)
504  {
505  if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i])
506  {
507  std::stringstream ss;
508  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
509  "] does not match the inferred shape at dimension index [";
510  ss << i << "] " << outputShape << " != " << inferredShape;
511  throw LayerValidationException(ss.str());
512  }
513  }
514  }
515 
516  TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo();
517 
518  armnn::TensorInfo inferredTensorInfo(inferredShape,
519  info.GetDataType(),
520  info.GetQuantizationScale(),
521  info.GetQuantizationOffset());
522 
523  GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo);
524 }
525 
526 void Layer::VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod)
527 {
528  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
529  {
530  ConditionalThrow<LayerValidationException>(
532  "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
533 
534  ConditionalThrow<LayerValidationException>(
535  outputShape.AreAllDimensionsSpecified(),
536  "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
537  }
538 }
539 
541 {
542  std::string guid = std::to_string(m_Guid);
543  std::string layerType = GetLayerTypeAsCString(m_Type);
544  std::string backendId = std::string(m_BackendId);
545  if (!(guid.compare("") == 0) && !guid.empty())
546  {
547  fn("Guid", guid);
548  }
549  if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
550  {
551  fn("LayerName",m_LayerName);
552  }
553  if(!(layerType.compare("") == 0) && !layerType.empty())
554  {
555  fn("LayerType",layerType);
556  }
557  if(!(backendId.compare("") == 0) && !backendId.empty())
558  {
559  fn("BackendID",backendId);
560  }
561  std::shared_ptr<ActivationDescriptor>
562  activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
563 
564  if (activationDescPtr)
565  {
566  StringifyLayerParameters<ActivationDescriptor>::Serialize(fn, *activationDescPtr.get());
567  }
568 }
569 
570 // default implementation of ExecuteStrategy
571 void Layer::ExecuteStrategy(IStrategy& strategy) const
572 {
573  strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
574 }
575 
577 {
578  const Layer *constThis = const_cast<const Layer*>(this);
579  ConstantTensors res;
580 
581  ImmutableConstantTensors immutableData = constThis->GetConstantTensorsByRef();
582  for (auto i : immutableData)
583  {
584  res.push_back(const_cast<std::shared_ptr<ConstTensorHandle>&>(i.get()));
585  }
586  return res;
587 }
588 
590 {
591  return m_OwningLayer;
592 }
593 
595 {
596  return m_OwningLayer;
597 }
598 
600 {
601  return m_OwningLayer;
602 }
603 
605 {
606  return m_OwningLayer;
607 }
608 
609 void InputSlot::SetTensorInfo(const TensorInfo tensorInfo)
610 {
611  m_OverriddenTensorInfo = Optional<TensorInfo>(tensorInfo);
612 }
613 
615 {
616  if (m_OverriddenTensorInfo.has_value())
617  {
618  return m_OverriddenTensorInfo.value();
619  }
620  else
621  {
622  return GetConnection()->GetTensorInfo();
623  }
624 }
625 
627 {
628  return m_OverriddenTensorInfo.has_value() || (GetConnection() && GetConnection()->IsTensorInfoSet());
629 }
630 
632 {
633  return m_OverriddenTensorInfo.has_value();
634 }
635 
636 } // namespace armnn
armnn::OutputSlot::SetTensorHandleFactory
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:213
armnn::InputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo tensorInfo) override
Sets the TensorInfo for this InputSlot.
Definition: Layer.cpp:609
arm
Definition: BackendRegistry.hpp:15
armnn::OutputSlot::operator==
bool operator==(const OutputSlot &other) const
Definition: Layer.cpp:185
armnn::Optional
Definition: Optional.hpp:270
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:100
WorkloadData.hpp
armnnUtils::SqueezeDims
std::vector< unsigned int > SqueezeDims(const armnn::TensorShape &tensorShape)
Definition: TensorUtils.cpp:195
armnn::OutputSlot::ValidateTensorShape
bool ValidateTensorShape(const TensorShape &shape) const
Definition: Layer.cpp:114
armnn::OutputSlot::GetOutputHandler
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:139
armnn::Layer::OperateOnConstantTensors
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:319
armnn::Layer::m_AdditionalInfoObject
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:427
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::OutputSlot
Definition: Layer.hpp:100
armnn::OutputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:95
armnn::TensorShape::GetDimensionSpecificity
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
armnn::LayerPriority
unsigned int LayerPriority
Definition: Layer.hpp:227
armnn::TensorInfo
Definition: Tensor.hpp:152
Graph.hpp
armnn::OutputHandler::SetTensorInfo
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
Definition: OutputHandler.cpp:15
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:457
armnn::OutputSlot::DisconnectAll
void DisconnectAll()
Definition: Layer.cpp:147
armnn::InputSlot::GetOwningIConnectableLayer
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:599
armnn::IConnectableLayer::ConstantTensors
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:136
armnn::TensorShape::AreAllDimensionsSpecified
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition: Tensor.cpp:241
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
armnn::Layer::m_OutputHandlers
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:440
armnn::OutputHandler::CreateTensorHandles
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
Definition: OutputHandler.cpp:21
armnn::Layer::GetConstantTensorsByRef
virtual ConstantTensors GetConstantTensorsByRef() override final
Definition: Layer.cpp:576
armnn::OutputSlot::GetOwningIConnectableLayer
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:589
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn::Layer::GetInputSlots
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:258
armnn::OutputSlot::Connect
int Connect(InputSlot &destination)
Definition: Layer.cpp:123
armnn::IStrategy
Definition: IStrategy.hpp:16
armnn::CheckLocation::AsString
std::string AsString() const
Definition: Exceptions.hpp:29
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::OutputSlot::IsTensorInfoSet
bool IsTensorInfoSet() const override
Definition: Layer.cpp:105
armnn::OutputSlot::GetEdgeStrategyForConnection
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition: Layer.cpp:228
armnn::Layer::Layer
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:260
NumericCast.hpp
armnn::Layer::CreateTensorHandles
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition: Layer.cpp:308
armnn::OutputSlot::GetOwningLayerGuid
LayerGuid GetOwningLayerGuid() const override
Definition: Layer.cpp:208
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
TensorUtils.hpp
armnn::Layer
Definition: Layer.hpp:230
armnn::IConnectableLayer::ImmutableConstantTensors
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >> ImmutableConstantTensors
Definition: INetwork.hpp:141
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:614
armnn::Layer::SerializeLayerParameters
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
Definition: Layer.cpp:540
armnn::OutputSlot::CalculateIndexOnOwner
unsigned int CalculateIndexOnOwner() const override
Definition: Layer.cpp:172
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:132
armnn::CheckLocation
Definition: Exceptions.hpp:14
armnn::EdgeStrategy::Undefined
@ Undefined
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::OutputSlot::Disconnect
void Disconnect(InputSlot &slot)
Definition: Layer.cpp:131
armnn::OutputHandler::IsTensorInfoSet
bool IsTensorInfoSet() const
Returns true if SetTensorInfo() has been called at least once on this.
Definition: OutputHandler.hpp:58
armnn::WorkloadDataCollector
Definition: WorkloadDataCollector.hpp:15
armnn::OutputSlot::GetNumConnections
unsigned int GetNumConnections() const override
Definition: Layer.hpp:158
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::InputSlot::IsTensorInfoSet
bool IsTensorInfoSet() const override
Returns true if this InputSlot either has an overridden TensorInfo for this InputSlot that was set th...
Definition: Layer.cpp:626
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::Layer::GetGuid
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:343
armnn::Layer::GetOutputHandler
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:245
armnn::Layer::GetNumOutputSlots
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:335
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:526
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
armnn::QueueDescriptor::m_AdditionalInfoObject
void * m_AdditionalInfoObject
Definition: WorkloadData.hpp:28
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:303
armnn::StringifyLayerParameters::Serialize
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
Definition: SerializeLayerParameters.hpp:25
armnn::InputSlot::GetConnection
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:219
armnn::Dimensionality::NotSpecified
@ NotSpecified
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::InputSlot::IsTensorInfoOverridden
bool IsTensorInfoOverridden() const override
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
Definition: Layer.cpp:631
armnn::OutputHandler
Definition: OutputHandler.hpp:28
armnn::BoostLogSeverityMapping::info
@ info
armnn::WorkloadDataCollector::Push
void Push(ITensorHandle *handle, const TensorInfo &info)
Definition: WorkloadDataCollector.hpp:24
armnn::Layer::ReleaseConstantData
virtual void ReleaseConstantData()
Definition: Layer.cpp:336
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::OutputSlot::MoveAllConnections
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition: Layer.cpp:156
armnn::Layer::GetNameStr
const std::string & GetNameStr() const
Definition: Layer.hpp:240
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::Layer::GetNumInputSlots
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:334
armnn::InputSlot
Definition: Layer.hpp:42
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::ShapeInferenceMethod::InferAndValidate
@ InferAndValidate
Infer missing output shapes and validate all output shapes.
armnn::InputSlot::Insert
void Insert(Layer &layer)
Definition: Layer.cpp:53
armnn::AssertNumberOfInputSlots
void AssertNumberOfInputSlots(Layer &layer)
Definition: Layer.cpp:28
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::Layer::GetDataType
DataType GetDataType() const
Definition: Layer.cpp:345
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::Dimensionality::Specified
@ Specified
TensorHandle.hpp
armnn::InputSlot::SetConnection
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition: Layer.hpp:63
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::Layer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: Layer.cpp:571
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::OutputSlot::SetEdgeStrategy
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:223
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:218
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::Layer::ValidateTensorShapesFromInputs
virtual void ValidateTensorShapesFromInputs()=0
armnn::GraphValidationException
Definition: Exceptions.hpp:110
Layer.hpp
armnn::Layer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition: Layer.cpp:432
armnn::Layer::GetPriority
LayerPriority GetPriority() const
Definition: Layer.cpp:360
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::Layer::VerifyLayerConnections
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:410
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::LayerType::Input
@ Input
armnn::OutputHandler::GetTensorInfo
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.
Definition: OutputHandler.hpp:42
armnn::UnimplementedException
Definition: Exceptions.hpp:98
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::ShapeInferenceMethod
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:235
armnn::NullPointerException
Definition: Exceptions.hpp:146
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::OutputSlot::GetConnection
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:83
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
armnn::LayerType::Output
@ Output
armnn::Layer::ResetPriority
void ResetPriority() const
Definition: Layer.cpp:354
armnn::DataLayout::NCHW
@ NCHW
ARMNN_THROW_INVALIDARG_MSG_IF_FALSE
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
Definition: Exceptions.hpp:210