46 if (inputShapes.size() != 1)
48 throw armnn::Exception(
"inputShapes' size is \"" + std::to_string(inputShapes.size()) +
49 "\" - should be \"1\".");
60 outputShape[hIndex] = inputShape[hIndex] /
m_Param.m_BlockSize;
61 outputShape[wIndex] = inputShape[wIndex] /
m_Param.m_BlockSize;
63 outputShape[cIndex] = inputShape[cIndex] *
m_Param.m_BlockSize *
m_Param.m_BlockSize;
65 return std::vector<TensorShape>({ outputShape });
79 if (inferredShapes.size() != 1)
82 + std::to_string(inferredShapes.size()) +
83 " elements - should only have 1.");
Base class for all ArmNN exceptions so that users can filter to just those.
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
LayerType * CloneBase(Graph &graph, Params &&... params) const
const char * GetName() const override
Returns the name of the layer.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void SetAdditionalInfo(QueueDescriptor &descriptor) const
ShapeInferenceMethod m_ShapeInferenceMethod
LayerWithParameters(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const SpaceToDepthDescriptor ¶m, const char *name)
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
const SpaceToDepthDescriptor & GetParameters() const override
SpaceToDepthDescriptor m_Param
const TensorInfo & GetTensorInfo() const override
SpaceToDepthLayer(const SpaceToDepthDescriptor param, const char *name)
Constructor to create a SpaceToDepthLayer.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
SpaceToDepthLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of SpaceToDepthLayer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the SpaceToDepth type.
const TensorShape & GetShape() const
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
unsigned int GetWidthIndex() const
unsigned int GetHeightIndex() const
unsigned int GetChannelsIndex() const
Copyright (c) 2021 ARM Limited and Contributors.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor ¶ms, Decoder< float > &inputData, Encoder< float > &outputData)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.