24.08
|
Go to the documentation of this file.
16 #include <client/include/IProfilingService.hpp>
18 #include <fmt/format.h>
26 NullDescriptor Layer::m_NullDescriptor;
62 if (prevSlot !=
nullptr)
85 ValidateConnectionIndex(index);
86 return m_Connections[index];
91 ValidateConnectionIndex(index);
92 return m_Connections[index];
118 throw armnn::Exception(
"TensorInfo must be set in order to validate the shape.");
126 m_Connections.push_back(&destination);
128 return armnn::numeric_cast<int>(m_Connections.size() - 1);
134 auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
136 if (it == m_Connections.end())
141 auto idx = std::distance(m_Connections.begin(), it);
142 m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
144 m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
162 throw armnn::Exception(
"Cannot move connections once memory strategies have be established.");
167 destination.
Connect(connection);
200 void OutputSlot::ValidateConnectionIndex(
unsigned int index)
const
202 if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
215 m_TensorHandleFactoryId = id;
220 return m_TensorHandleFactoryId;
225 m_EdgeStrategies[connectionIndex] = strategy;
230 return m_EdgeStrategies[connectionIdx];
234 unsigned int numOutputSlots,
238 : m_OutputHandlers(numOutputSlots)
240 , m_LayerName(name ? name :
"")
244 , m_Guid(
arm::pipe::IProfilingService::GetNextGuid())
247 m_InputSlots.reserve(numInputSlots);
248 for (
unsigned int i = 0; i < numInputSlots; ++i)
250 m_InputSlots.emplace_back(*
this, i);
253 m_OutputSlots.reserve(numOutputSlots);
254 for (
unsigned int i = 0; i < numOutputSlots; ++i)
261 unsigned int numOutputSlots,
273 if (!inputSlot.GetConnection())
277 const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
279 if (inputSlot.IsTensorInfoOverridden() && outputHandler.GetData())
281 auto handler = outputHandler.GetData()->DecorateTensorHandle(inputSlot.GetTensorInfo());
286 dataCollector.
Push(handler.get(), inputSlot.GetTensorInfo());
291 dataCollector.
Push(outputHandler.GetData(), inputSlot.GetTensorInfo());
295 void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector)
const
299 outputHandler.CollectWorkloadOutputs(dataCollector);
310 const bool IsMemoryManaged)
326 handleFactory = registry.
GetFactory(factoryId);
362 constexpr
LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
363 constexpr
LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
367 m_Priority = inputPrio;
371 m_Priority = outputPrio;
373 else if (m_Priority == 0)
382 const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
399 if (parentPrio >= outputPrio)
404 m_Priority = parentPrio + 1U;
417 for (
unsigned int i=0; i<expectedConnections; ++i)
422 fmt::format(
"Input connection #{0} must be connected "
423 "for {1} layer {2} {3}",
445 fmt::format(
"Default implementation for InferOutputShapes can only be used for "
446 "layers with the same number of input and output slots. This doesn't "
447 "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
460 const std::string& layerName,
461 const unsigned int outputSlotIndex)
465 if (m_AllowExpandedDims)
470 if (outputDims.size() != inferredDims.size())
472 std::stringstream ss;
473 ss << layerName <<
": TensorShape set on OutputSlot[" << outputSlotIndex <<
474 "] does not match the inferred shape. ";
475 ss << outputShape <<
" != " << inferredShape;
478 for (
unsigned int i = 0; i < outputDims.size(); ++i)
480 if (outputDims[i] != inferredDims[i])
482 std::stringstream ss;
483 ss << layerName <<
": TensorShape set on OutputSlot[" << outputSlotIndex <<
484 "] does not match the inferred shape at dimension index [";
485 ss << i <<
"] " << outputShape <<
" != " << inferredShape;
493 ConditionalThrowIfNotEqual<LayerValidationException>(
494 layerName +
": TensorShape set on OutputSlot[0] does not match the inferred shape.",
507 std::stringstream ss;
508 ss << layerName <<
": TensorShape set on OutputSlot[" << outputSlotIndex <<
509 "] does not match the inferred shape at dimension index [";
510 ss << i <<
"] " << outputShape <<
" != " << inferredShape;
520 info.GetQuantizationScale(),
521 info.GetQuantizationOffset());
530 ConditionalThrow<LayerValidationException>(
532 "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
534 ConditionalThrow<LayerValidationException>(
536 "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
542 std::string guid = std::to_string(m_Guid);
544 std::string backendId = std::string(m_BackendId);
545 if (!(guid.compare(
"") == 0) && !guid.empty())
549 if(!(m_LayerName.compare(
"") == 0) && !m_LayerName.empty())
551 fn(
"LayerName",m_LayerName);
553 if(!(layerType.compare(
"") == 0) && !layerType.empty())
555 fn(
"LayerType",layerType);
557 if(!(backendId.compare(
"") == 0) && !backendId.empty())
559 fn(
"BackendID",backendId);
561 std::shared_ptr<ActivationDescriptor>
562 activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
564 if (activationDescPtr)
578 const Layer *constThis =
const_cast<const Layer*
>(
this);
582 for (
auto i : immutableData)
584 res.push_back(
const_cast<std::shared_ptr<ConstTensorHandle>&
>(i.get()));
591 return m_OwningLayer;
596 return m_OwningLayer;
601 return m_OwningLayer;
606 return m_OwningLayer;
616 if (m_OverriddenTensorInfo.has_value())
618 return m_OverriddenTensorInfo.value();
633 return m_OverriddenTensorInfo.has_value();
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
bool operator==(const OutputSlot &other) const
const char * GetLayerTypeAsCString(LayerType type)
const TensorInfo & GetTensorInfo() const override
std::vector< unsigned int > SqueezeDims(const armnn::TensorShape &tensorShape)
bool ValidateTensorShape(const TensorShape &shape) const
const OutputHandler & GetOutputHandler() const
void OperateOnConstantTensors(Op op)
AdditionalInfoObjectPtr m_AdditionalInfoObject
void SetTensorInfo(const TensorInfo &tensorInfo) override
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
unsigned int LayerPriority
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
std::vector< OutputHandler > m_OutputHandlers
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
virtual ConstantTensors GetConstantTensorsByRef() override final
const IConnectableLayer & GetOwningIConnectableLayer() const override
virtual const TensorInfo & GetTensorInfo() const =0
const std::vector< InputSlot > & GetInputSlots() const
int Connect(InputSlot &destination)
std::string AsString() const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
bool IsTensorInfoSet() const override
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
LayerGuid GetOwningLayerGuid() const override
const char * GetName() const override
Returns the name of the layer.
static const FactoryId LegacyFactoryId
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >> ImmutableConstantTensors
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
unsigned int CalculateIndexOnOwner() const override
Layer & GetOwningLayer() const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
void Disconnect(InputSlot &slot)
bool IsTensorInfoSet() const
Returns true if SetTensorInfo() has been called at least once on this.
unsigned int GetNumConnections() const override
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
LayerGuid GetGuid() const final
Returns the unique id of the layer.
const OutputHandler & GetOutputHandler(unsigned int i=0) const
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
void * m_AdditionalInfoObject
void SetAdditionalInfo(QueueDescriptor &descriptor) const
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
Base class for all ArmNN exceptions so that users can filter to just those.
Base class for all descriptors.
void Push(ITensorHandle *handle, const TensorInfo &info)
virtual void ReleaseConstantData()
DataType GetDataType() const
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
const std::string & GetNameStr() const
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
@ ValidateOnly
Validate all output shapes.
@ InferAndValidate
Infer missing output shapes and validate all output shapes.
void AssertNumberOfInputSlots(Layer &layer)
DataType GetDataType() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
virtual bool IsTensorInfoSet() const =0
const TensorShape & GetShape() const
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
void IgnoreUnused(Ts &&...)
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Copyright (c) 2021 ARM Limited and Contributors.
virtual void ValidateTensorShapesFromInputs()=0
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
LayerPriority GetPriority() const
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Dimensionality GetDimensionality() const
Function that returns the tensor type.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
const InputSlot * GetConnection(unsigned int index) const override
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
void ResetPriority() const
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)