24.08
|
Go to the documentation of this file.
38 template<
typename FactoryType>
40 const FactoryType& factory,
45 bool useSubTensors = factory.SupportsSubTensors();
56 std::vector<std::unique_ptr<ITensorHandle>> subTensors;
62 std::set<unsigned int>::iterator axisIt = axis.begin();
65 ((*axisIt == numberOfDimensions - 1) ||
66 (*axisIt == numberOfDimensions - 2));
79 bool canUseSubTensorOnXorY =
true;
80 bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
81 if (isTensorHandleFactory)
83 for (
unsigned int it = 0; it < numOutputSlots; ++it)
87 std::vector<Capability> capabilities =
93 canUseSubTensorOnXorY =
false;
94 if (capabilities.empty())
96 canUseSubTensorOnXorY =
true;
100 if (!canUseSubTensorOnXorY)
107 auto CreateSubTensor = [&]()
118 canUseSubTensorOnXorY)
121 return factory.CreateSubTensorHandle(*inputData,
126 return std::unique_ptr<ITensorHandle>();
129 auto subTensor = CreateSubTensor();
132 useSubTensors =
false;
135 subTensors.push_back(std::move(subTensor));
141 for (
auto& subTensor : subTensors)
160 const bool isMemoryManaged)
167 CreateTensors(registry, workloadFactory, isMemoryManaged);
176 CreateTensors(registry, *handleFactory, isMemoryManaged);
189 throw armnn::Exception(
"inputShapes' and m_NumViews' sizes do not match (\""
190 + std::to_string(inputShapes.size()) +
195 std::vector<TensorShape> outShapes;
212 std::vector<TensorShape> views;
224 + std::to_string(inferredShapes.size()) +
232 inferredShapes[viewIdx],
A ViewsDescriptor for the SplitterLayer.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
This layer represents a split operation.
const TensorInfo & GetTensorInfo() const override
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle * > inputs, std::vector< ITensorHandle * > outputs)
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
SplitterLayer(const ViewsDescriptor ¶m, const char *name)
Constructor to create a SplitterLayer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of SplitterLayer.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
std::vector< OutputHandler > m_OutputHandlers
const std::vector< InputSlot > & GetInputSlots() const
std::vector< ViewOrigin > m_ViewOrigins
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
const ViewsDescriptor & GetParameters() const override
ITensorHandle * GetData() const
Gets the allocated tensor memory.
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
const char * GetName() const override
Returns the name of the layer.
static const FactoryId LegacyFactoryId
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
SplitterLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
ViewsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same.
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Base class for all ArmNN exceptions so that users can filter to just those.
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
std::vector< OutputSlot >::iterator BeginOutputSlots()
const TensorShape & GetShape() const
#define ARMNN_NO_DEPRECATE_WARN_END
uint32_t GetNumDimensions() const
Get the number of dimensions.
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Copyright (c) 2021 ARM Limited and Contributors.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
Calculates the axis values for split operation.
uint32_t GetNumViews() const
Get the number of views.
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
const InputSlot * GetConnection(unsigned int index) const override
std::vector< OutputSlot >::iterator EndOutputSlots()
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Splitter type.