ArmNN
 25.11
Loading...
Searching...
No Matches
Layer.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#include "Layer.hpp"
6
7#include "Graph.hpp"
8
11
13
15
16#include <client/include/IProfilingService.hpp>
17
18#include <fmt/format.h>
19
20#include <numeric>
21
22namespace armnn
23{
24
25// Instantiate the static member variable
26NullDescriptor Layer::m_NullDescriptor;
27
29{
30 switch (layer.GetType())
31 {
35 {
36 if (layer.GetNumInputSlots() != 2 && layer.GetNumInputSlots() != 3)
37 {
38 throw armnn::Exception("layer must have either 2 or 3 input slots.");
39 }
40 break;
41 }
42 default:
43 {
44 if (layer.GetNumInputSlots() != 1)
45 {
46 throw armnn::Exception("layer must have one input slot.");
47 }
48 break;
49 }
50 }
51}
52
54{
55 if (layer.GetNumOutputSlots() != 1)
56 {
57 throw armnn::Exception("layer must have one output slot.");
58 }
59
60 OutputSlot* const prevSlot = GetConnectedOutputSlot();
61
62 if (prevSlot != nullptr)
63 {
64 // Disconnects parent from this.
65 prevSlot->Disconnect(*this);
66
68
69 // Connects inserted layer to parent.
70 int idx = prevSlot->Connect(layer.GetInputSlot(0));
72
73 // Sets tensor info for inserted layer.
74 const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
75 layer.GetOutputHandler().SetTensorInfo(tensorInfo);
76 }
77
78 // Connects inserted layer to this.
79 layer.GetOutputSlot(0).Connect(*this);
81}
82
83const InputSlot* OutputSlot::GetConnection(unsigned int index) const
84{
85 ValidateConnectionIndex(index);
86 return m_Connections[index];
87}
88
90{
91 ValidateConnectionIndex(index);
92 return m_Connections[index];
93}
94
96{
97 GetOutputHandler().SetTensorInfo(tensorInfo);
98}
99
104
106{
107 if (GetOwningLayer().GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
108 {
110 }
112}
113
115{
116 if (!IsTensorInfoSet())
117 {
118 throw armnn::Exception("TensorInfo must be set in order to validate the shape.");
119 }
120 return shape == m_OutputHandler.GetTensorInfo().GetShape();
121}
122
124{
125 destination.SetConnection(this);
126 m_Connections.push_back(&destination);
127 m_EdgeStrategies.push_back(EdgeStrategy::Undefined);
128 return armnn::numeric_cast<int>(m_Connections.size() - 1);
129}
130
132{
133 slot.SetConnection(nullptr);
134 auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
135
136 if (it == m_Connections.end())
137 {
138 return;
139 }
140
141 auto idx = std::distance(m_Connections.begin(), it);
142 m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
143
144 m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
145}
146
148{
149 while (GetNumConnections() > 0)
150 {
151 InputSlot& connection = *GetConnection(0);
152 Disconnect(connection);
153 }
154}
155
157{
158 while (GetNumConnections() > 0)
159 {
160 if (m_EdgeStrategies[0] != EdgeStrategy::Undefined)
161 {
162 throw armnn::Exception("Cannot move connections once memory strategies have be established.");
163 }
164
165 InputSlot& connection = *GetConnection(0);
166 Disconnect(connection);
167 destination.Connect(connection);
169 }
170}
171
173{
174 for (unsigned int i = 0; i < GetOwningLayer().GetNumOutputSlots(); i++)
175 {
176 if (GetOwningLayer().GetOutputSlot(i) == (*this))
177 {
178 return i;
179 }
180 }
181 throw armnn::Exception("Did not find slot on owner.");
182 return 0; // Error
183}
184
185bool OutputSlot::operator==(const OutputSlot& other) const
186{
187 bool isSame = other.GetNumConnections() == GetNumConnections();
188 if (!isSame)
189 {
190 return false;
191 }
192
193 for (unsigned int i = 0; i < GetNumConnections(); i++)
194 {
195 isSame &= other.GetConnection(i) == GetConnection(i);
196 }
197 return isSame;
198}
199
200void OutputSlot::ValidateConnectionIndex(unsigned int index) const
201{
202 if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
203 {
204 throw InvalidArgumentException((fmt::format("GetConnection: Invalid index {} provided", index)));
205 }
206}
207
212
214{
215 m_TensorHandleFactoryId = id;
216}
217
219{
220 return m_TensorHandleFactoryId;
221}
222
223void OutputSlot::SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
224{
225 m_EdgeStrategies[connectionIndex] = strategy;
226}
227
229{
230 return m_EdgeStrategies[connectionIdx];
231}
232
233Layer::Layer(unsigned int numInputSlots,
234 unsigned int numOutputSlots,
235 LayerType type,
236 DataLayout layout,
237 const char* name)
238: m_OutputHandlers(numOutputSlots)
240, m_LayerName(name ? name : "")
241, m_Type(type)
242, m_BackendId()
243, m_BackendHint(EmptyOptional())
244, m_Guid(arm::pipe::IProfilingService::GetNextGuid())
245{
246 IgnoreUnused(layout);
247 m_InputSlots.reserve(numInputSlots);
248 for (unsigned int i = 0; i < numInputSlots; ++i)
249 {
250 m_InputSlots.emplace_back(*this, i);
251 }
252
253 m_OutputSlots.reserve(numOutputSlots);
254 for (unsigned int i = 0; i < numOutputSlots; ++i)
255 {
256 m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
257 }
258}
259
260Layer::Layer(unsigned int numInputSlots,
261 unsigned int numOutputSlots,
262 LayerType type,
263 const char* name)
264: Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
265{
266}
267
268void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
269{
270 for (auto&& inputSlot : GetInputSlots())
271 {
272 // The graph must be well-formed at this point.
273 if (!inputSlot.GetConnection())
274 {
275 throw armnn::Exception("input slot must have valid connection.");
276 }
277 const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
278
279 if (inputSlot.IsTensorInfoOverridden() && outputHandler.GetData())
280 {
281 auto handler = outputHandler.GetData()->DecorateTensorHandle(inputSlot.GetTensorInfo());
282
283 if (handler)
284 {
285 // Add overridden TensorHandle
286 dataCollector.Push(handler.get(), inputSlot.GetTensorInfo());
287 continue;
288 }
289 }
290 // Add default TensorHandle
291 dataCollector.Push(outputHandler.GetData(), inputSlot.GetTensorInfo());
292 }
293}
294
295void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
296{
297 for (auto&& outputHandler : m_OutputHandlers)
298 {
299 outputHandler.CollectWorkloadOutputs(dataCollector);
300 }
301}
302
304{
306}
307
309 const IWorkloadFactory& workloadFactory,
310 const bool IsMemoryManaged)
311{
312 for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
313 {
314
315 OutputSlot& slot = GetOutputSlot(idx);
317
318 OutputHandler& handler = GetOutputHandler(idx);
320 {
321 handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
322 }
323 else
324 {
325 ITensorHandleFactory* handleFactory;
326 handleFactory = registry.GetFactory(factoryId);
327 if (!handleFactory)
328 {
329 throw armnn::NullPointerException("handleFactory must not be null.");
330 }
331 handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
332 }
333 }
334}
335
337{
338 // Now free up the static data.
339 OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
340 {
341 handle.reset();
342 });
343}
344
346{
347 if (GetNumInputSlots() > 0) // Ignore the input layer.
348 {
350 }
352}
353
355{
356 m_Priority = 0;
357 m_Visiting = false;
358}
359
361{
362 constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
363 constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
364
365 if (GetType() == LayerType::Input)
366 {
367 m_Priority = inputPrio;
368 }
369 else if (GetType() == LayerType::Output)
370 {
371 m_Priority = outputPrio;
372 }
373 else if (m_Priority == 0)
374 {
375 if (m_Visiting)
376 {
377 throw GraphValidationException("Graph has circular dependencies: cannot walk");
378 }
379
380 auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
381 {
382 const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
383 if (outputSlot)
384 {
385 const Layer& input = outputSlot->GetOwningLayer();
386 return std::max(prio, input.GetPriority());
387 }
388 else
389 {
390 // unconnected input slot
391 return prio;
392 }
393 };
394
395 m_Visiting = true;
396 LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
397 m_Visiting = false;
398
399 if (parentPrio >= outputPrio)
400 {
401 throw GraphValidationException("Graph has too many edges");
402 }
403
404 m_Priority = parentPrio + 1U;
405 }
406
407 return m_Priority;
408}
409
410void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
411{
412 if (GetNumInputSlots() != expectedConnections)
413 {
414 throw armnn::Exception("input slots must match expected connections.");
415 }
416
417 for (unsigned int i=0; i<expectedConnections; ++i)
418 {
419 if (GetInputSlot(i).GetConnection() == nullptr)
420 {
422 fmt::format("Input connection #{0} must be connected "
423 "for {1} layer {2} {3}",
424 i,
426 GetNameStr(),
427 location.AsString()));
428 }
429 }
430}
431
432std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
433{
434 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(GetNumInputSlots() != 0, "input slots must not be zero.");
435 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(GetNumOutputSlots() != 0, "output slots must not be zero.");
436
437 // By default we return what we got, meaning the output shape(s) are the same as the input(s).
438 // This only works if the number of inputs and outputs are the same. Since we are in the Layer
439 // base class, this means the implementation needs to be overridden in the specific layers for
440 // the other cases. So the missing implementation justifies the UnimplementedException.
441
443 {
445 fmt::format("Default implementation for InferOutputShapes can only be used for "
446 "layers with the same number of input and output slots. This doesn't "
447 "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
449 GetNameStr(),
452 CHECK_LOCATION().AsString()));
453 }
454 return inputShapes;
455}
456
458 const TensorShape& inferredShape,
459 const ShapeInferenceMethod shapeInferenceMethod,
460 const std::string& layerName,
461 const unsigned int outputSlotIndex)
462{
463 if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
464 {
465 if (m_AllowExpandedDims)
466 {
467 std::vector<unsigned int> outputDims = armnnUtils::SqueezeDims(outputShape);
468 std::vector<unsigned int> inferredDims = armnnUtils::SqueezeDims(inferredShape);
469
470 if (outputDims.size() != inferredDims.size())
471 {
472 std::stringstream ss;
473 ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
474 "] does not match the inferred shape. ";
475 ss << outputShape << " != " << inferredShape;
476 throw LayerValidationException(ss.str());
477 }
478 for (unsigned int i = 0; i < outputDims.size(); ++i)
479 {
480 if (outputDims[i] != inferredDims[i])
481 {
482 std::stringstream ss;
483 ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
484 "] does not match the inferred shape at dimension index [";
485 ss << i << "] " << outputShape << " != " << inferredShape;
486 throw LayerValidationException(ss.str());
487 }
488 }
489 return;
490 }
491 else
492 {
494 layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
495 outputShape,
496 inferredShape);
497 return;
498 }
499 }
500
501 if (outputShape.GetDimensionality() == Dimensionality::Specified)
502 {
503 for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i)
504 {
505 if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i])
506 {
507 std::stringstream ss;
508 ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
509 "] does not match the inferred shape at dimension index [";
510 ss << i << "] " << outputShape << " != " << inferredShape;
511 throw LayerValidationException(ss.str());
512 }
513 }
514 }
515
516 TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo();
517
518 armnn::TensorInfo inferredTensorInfo(inferredShape,
519 info.GetDataType(),
520 info.GetQuantizationScale(),
521 info.GetQuantizationOffset());
522
523 GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo);
524}
525
526void Layer::VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod)
527{
528 if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
529 {
532 "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
533
535 outputShape.AreAllDimensionsSpecified(),
536 "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
537 }
538}
539
541{
542 std::string guid = std::to_string(m_Guid);
543 std::string layerType = GetLayerTypeAsCString(m_Type);
544 std::string backendId = std::string(m_BackendId);
545 if (!(guid.compare("") == 0) && !guid.empty())
546 {
547 fn("Guid", guid);
548 }
549 if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
550 {
551 fn("LayerName",m_LayerName);
552 }
553 if(!(layerType.compare("") == 0) && !layerType.empty())
554 {
555 fn("LayerType",layerType);
556 }
557 if(!(backendId.compare("") == 0) && !backendId.empty())
558 {
559 fn("BackendID",backendId);
560 }
561 std::shared_ptr<ActivationDescriptor>
563
564 if (activationDescPtr)
565 {
567 }
568}
569
570// default implementation of ExecuteStrategy
572{
573 strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
574}
575
577{
578 const Layer *constThis = const_cast<const Layer*>(this);
579 ConstantTensors res;
580
581 ImmutableConstantTensors immutableData = constThis->GetConstantTensorsByRef();
582 for (auto i : immutableData)
583 {
584 res.push_back(const_cast<std::shared_ptr<ConstTensorHandle>&>(i.get()));
585 }
586 return res;
587}
588
590{
591 return m_OwningLayer;
592}
593
595{
596 return m_OwningLayer;
597}
598
600{
601 return m_OwningLayer;
602}
603
605{
606 return m_OwningLayer;
607}
608
610{
611 m_OverriddenTensorInfo = Optional<TensorInfo>(tensorInfo);
612}
613
615{
616 if (m_OverriddenTensorInfo.has_value())
617 {
618 return m_OverriddenTensorInfo.value();
619 }
620 else
621 {
622 return GetConnection()->GetTensorInfo();
623 }
624}
625
627{
628 return m_OverriddenTensorInfo.has_value() || (GetConnection() && GetConnection()->IsTensorInfoSet());
629}
630
632{
633 return m_OverriddenTensorInfo.has_value();
634}
635
636} // namespace armnn
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
#define CHECK_LOCATION()
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition Types.hpp:26
Base class for all ArmNN exceptions so that users can filter to just those.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition INetwork.hpp:81
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > > > ConstantTensors
Definition INetwork.hpp:136
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > > > ImmutableConstantTensors
Definition INetwork.hpp:141
virtual bool IsTensorInfoSet() const =0
virtual const TensorInfo & GetTensorInfo() const =0
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
static const FactoryId LegacyFactoryId
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition Layer.cpp:599
bool IsTensorInfoOverridden() const override
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
Definition Layer.cpp:631
const IOutputSlot * GetConnection() const override
Definition Layer.hpp:219
void Insert(Layer &layer)
Definition Layer.cpp:53
void SetTensorInfo(const TensorInfo tensorInfo) override
Sets the TensorInfo for this InputSlot.
Definition Layer.cpp:609
const OutputSlot * GetConnectedOutputSlot() const
Definition Layer.hpp:56
bool IsTensorInfoSet() const override
Returns true if this InputSlot either has an overridden TensorInfo for this InputSlot that was set th...
Definition Layer.cpp:626
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition Layer.cpp:614
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition Layer.hpp:63
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition Layer.cpp:410
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition Layer.hpp:335
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
virtual void ReleaseConstantData()
Definition Layer.cpp:336
const std::string & GetNameStr() const
Definition Layer.hpp:240
void OperateOnConstantTensors(Op op)
Definition Layer.hpp:319
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition Layer.cpp:308
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition Layer.cpp:526
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition Layer.cpp:571
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition Layer.cpp:260
const std::vector< InputSlot > & GetInputSlots() const
Definition Layer.hpp:258
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition Layer.cpp:432
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
virtual void ValidateTensorShapesFromInputs()=0
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition Layer.hpp:343
virtual ConstantTensors GetConstantTensorsByRef() override final
Definition Layer.cpp:576
std::vector< OutputHandler > m_OutputHandlers
Definition Layer.hpp:440
void ResetPriority() const
Definition Layer.cpp:354
std::shared_ptr< T > GetAdditionalInformation() const
Definition Layer.hpp:368
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition Layer.hpp:245
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition Layer.hpp:334
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
Definition Layer.cpp:540
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition Layer.hpp:430
const char * GetName() const override
Returns the name of the layer.
Definition Layer.hpp:332
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition Layer.hpp:286
DataType GetDataType() const
Definition Layer.cpp:345
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition Layer.cpp:457
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition Layer.cpp:303
LayerPriority GetPriority() const
Definition Layer.cpp:360
ShapeInferenceMethod m_ShapeInferenceMethod
Definition Layer.hpp:441
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
bool IsTensorInfoSet() const
Returns true if SetTensorInfo() has been called at least once on this.
const InputSlot * GetConnection(unsigned int index) const override
Definition Layer.cpp:83
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition Layer.cpp:589
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition Layer.cpp:156
unsigned int GetNumConnections() const override
Definition Layer.hpp:158
unsigned int CalculateIndexOnOwner() const override
Definition Layer.cpp:172
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition Layer.cpp:223
OutputSlot(Layer &owner, OutputHandler &outputHandler)
Definition Layer.hpp:103
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition Layer.cpp:228
void DisconnectAll()
Definition Layer.cpp:147
LayerGuid GetOwningLayerGuid() const override
Definition Layer.cpp:208
const OutputHandler & GetOutputHandler() const
Definition Layer.hpp:139
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition Layer.cpp:95
Layer & GetOwningLayer() const
Definition Layer.hpp:132
bool operator==(const OutputSlot &other) const
Definition Layer.cpp:185
bool IsTensorInfoSet() const override
Definition Layer.cpp:105
bool ValidateTensorShape(const TensorShape &shape) const
Definition Layer.cpp:114
void Disconnect(InputSlot &slot)
Definition Layer.cpp:131
const TensorInfo & GetTensorInfo() const override
Definition Layer.cpp:100
int Connect(InputSlot &destination)
Definition Layer.cpp:123
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition Layer.cpp:213
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition Layer.cpp:218
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
DataType GetDataType() const
Definition Tensor.hpp:200
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition Tensor.cpp:174
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition Tensor.cpp:211
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition Tensor.hpp:92
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition Tensor.cpp:241
void Push(ITensorHandle *handle, const TensorInfo &info)
Copyright (c) 2021 ARM Limited and Contributors.
unsigned int LayerPriority
Definition Layer.hpp:227
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
void AssertNumberOfInputSlots(Layer &layer)
Definition Layer.cpp:28
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
DataLayout
Definition Types.hpp:63
DataType
Definition Types.hpp:49
const char * GetLayerTypeAsCString(LayerType type)
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition Types.hpp:237
@ InferAndValidate
Infer missing output shapes and validate all output shapes.
Definition Types.hpp:241
@ ValidateOnly
Validate all output shapes.
Definition Types.hpp:239
void ConditionalThrowIfNotEqual(const std::string &message, const ComparedType &leftHandSide, const ComparedType &rightHandSide)
ComparedType must support: operator==(const ComparedType&) operator<<(ostream&, const ComparedType&)
void ConditionalThrow(bool condition, const std::string &message)
void IgnoreUnused(Ts &&...)
std::vector< unsigned int > SqueezeDims(const armnn::TensorShape &tensorShape)
Base class for all descriptors.
std::string AsString() const
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition Optional.hpp:32
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)