24.02
|
Go to the documentation of this file.
38 class IWorkloadFactory;
46 : m_OwningLayer(owner)
47 , m_Connection(nullptr)
48 , m_SlotIndex(slotIndex)
65 if (m_Connection !=
nullptr && source !=
nullptr)
68 "but the latter already has a connection");
70 m_Connection = source;
97 const unsigned int m_SlotIndex;
104 : m_OwningLayer(owner)
105 , m_OutputHandler(outputHandler)
122 catch (
const std::exception& e)
127 std::cerr <<
"WARNING: An error has occurred when disconnecting all output slots: "
128 << e.what() << std::endl;
158 unsigned int GetNumConnections()
const override {
return armnn::numeric_cast<unsigned int>(m_Connections.size()); }
168 return Connect(*PolymorphicDowncast<InputSlot*>(&destination));
173 return Disconnect(*PolymorphicDowncast<InputSlot*>(&slot));
187 void ValidateConnectionIndex(
unsigned int index)
const;
189 Layer& m_OwningLayer;
191 std::vector<InputSlot*> m_Connections;
194 std::vector<EdgeStrategy> m_EdgeStrategies;
201 if (m_Connection !=
nullptr)
208 catch (
const std::exception& e)
213 std::cerr <<
"WARNING: An error has occurred when disconnecting an input slot: "
214 << e.what() << std::endl;
223 class ScopedTensorHandle;
234 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
const char* name);
235 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
DataLayout layout,
const char* name);
258 const std::vector<InputSlot>&
GetInputSlots()
const {
return m_InputSlots; }
263 std::vector<InputSlot>::iterator
EndInputSlots() {
return m_InputSlots.end(); }
267 std::vector<OutputSlot>::iterator
EndOutputSlots() {
return m_OutputSlots.end(); }
272 unsigned int numConnections = 0;
276 numConnections += output.GetNumConnections();
299 const bool IsMemoryManaged =
true);
309 std::vector<TensorShape>
InferOutputShapes(
const std::vector<TensorShape>& inputShapes)
const override;
318 template<
typename Op>
332 const char*
GetName()
const override {
return m_LayerName.c_str(); }
334 unsigned int GetNumInputSlots()
const override {
return static_cast<unsigned int>(m_InputSlots.size()); }
335 unsigned int GetNumOutputSlots()
const override {
return static_cast<unsigned int>(m_OutputSlots.size()); }
349 virtual void Reparent(
Graph& dest, std::list<Layer*>::const_iterator iterator) = 0;
353 m_BackendHint = backend;
364 m_AllowExpandedDims = allowExpandedDims;
383 virtual ~Layer() =
default;
385 template <
typename QueueDescriptor>
389 CollectWorkloadInputs(dataCollector);
392 template <
typename QueueDescriptor>
396 CollectWorkloadOutputs(dataCollector);
402 const std::string& layerName,
403 const unsigned int outputSlotIndex = 0);
408 template <
typename QueueDescriptor>
418 template <
typename LayerType,
typename ... Params>
444 const std::string m_LayerName;
446 std::vector<InputSlot> m_InputSlots;
447 std::vector<OutputSlot> m_OutputSlots;
455 mutable bool m_Visiting =
false;
457 bool m_AllowExpandedDims =
false;
461 std::list<std::string> m_RelatedLayerNames;
474 unsigned int numOutputSlots,
478 :
Layer(numInputSlots, numOutputSlots, type, name)
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
bool operator==(const OutputSlot &other) const
const TensorInfo & GetTensorInfo() const override
BindableLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name, LayerBindingId id)
bool ValidateTensorShape(const TensorShape &shape) const
void SetGuid(LayerGuid guid)
const OutputHandler & GetOutputHandler() const
void OperateOnConstantTensors(Op op)
Optional< BackendId > GetBackendHint() const
void SetAllowExpandedDims(bool allowExpandedDims)
AdditionalInfoObjectPtr m_AdditionalInfoObject
void SetTensorInfo(const TensorInfo &tensorInfo) override
unsigned int LayerPriority
int Connect(IInputSlot &destination) override
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
std::vector< OutputHandler > m_OutputHandlers
virtual ConstantTensors GetConstantTensorsByRef() override final
const IConnectableLayer & GetOwningIConnectableLayer() const override
const std::vector< InputSlot > & GetInputSlots() const
int Connect(InputSlot &destination)
void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
bool IsTensorInfoSet() const override
OutputSlot & GetOutputSlot(unsigned int index=0) override
Get the output slot handle by slot index.
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
LayerGuid GetOwningLayerGuid() const override
const char * GetName() const override
Returns the name of the layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const =0
bool IsOutputUnconnected()
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >> ImmutableConstantTensors
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
unsigned int CalculateIndexOnOwner() const override
An output connection slot for a layer.
std::shared_ptr< T > GetAdditionalInformation() const
Layer & GetOwningLayer() const
LayerType * CloneBase(Graph &graph, Params &&... params) const
void Disconnect(InputSlot &slot)
OutputSlot & operator=(const OutputSlot &)=delete
unsigned int GetNumConnections() const override
Contains information about TensorInfos of a layer.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
std::vector< InputSlot >::iterator EndInputSlots()
LayerGuid GetGuid() const final
Returns the unique id of the layer.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
OutputHandler & GetOutputHandler()
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr &additionalInfo)
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
std::shared_ptr< void > AdditionalInfoObjectPtr
void SetAdditionalInfo(QueueDescriptor &descriptor) const
const std::vector< OutputSlot > & GetOutputSlots() const
Base class for all descriptors.
LayerBindingId GetBindingId() const
OutputHandler & GetOutputHandler(unsigned int i=0)
std::vector< InputSlot >::iterator BeginInputSlots()
virtual void ReleaseConstantData()
std::vector< ITensorHandle * > m_Outputs
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
const std::string & GetNameStr() const
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
ShapeInferenceMethod GetShapeInferenceMethod() const
virtual const BaseDescriptor & GetParameters() const override
If the layer has a descriptor return it.
const std::list< std::string > & GetRelatedLayerNames()
DataType GetDataType() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
std::vector< OutputSlot >::iterator BeginOutputSlots()
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
void AddRelatedLayerName(const std::string layerName)
const std::vector< InputSlot * > & GetConnections() const
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
void Disconnect(IInputSlot &slot) override
const BackendId & GetBackendId() const
virtual void Reparent(Graph &dest, std::list< Layer * >::const_iterator iterator)=0
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Copyright (c) 2021 ARM Limited and Contributors.
virtual void ValidateTensorShapesFromInputs()=0
void SetBackendId(const BackendId &id) override
Set the backend of the IConnectableLayer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
LayerPriority GetPriority() const
OutputSlot(Layer &owner, OutputHandler &outputHandler)
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
InputSlot & GetInputSlot(unsigned int index) override
Get the input slot handle by slot index.
bool GetAllowExpandedDims() const
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
const InputSlot * GetConnection(unsigned int index) const override
std::vector< OutputSlot >::iterator EndOutputSlots()
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
std::vector< ITensorHandle * > m_Inputs
void ResetPriority() const