32 const std::vector<TensorShape>& inputShapes =
39 unsigned int filterWidth = filterShape[dataLayoutIndex.
GetWidthIndex()];
40 unsigned int filterHeight = filterShape[dataLayoutIndex.
GetHeightIndex()];
41 unsigned int outChannels = filterShape[0];
43 fn(
"OutputChannels",std::to_string(outChannels));
44 fn(
"FilterWidth",std::to_string(filterWidth));
45 fn(
"FilterHeight",std::to_string(filterHeight));
60 auto layer = CloneBase<Convolution2dLayer>(graph,
m_Param,
GetName());
61 return std::move(layer);
66 if (inputShapes.size() != 2)
68 throw armnn::Exception(
"inputShapes' size is \"" + std::to_string(inputShapes.size()) +
69 "\" - should be \"2\".");
93 unsigned int inWidth = inputShape[dataLayoutIndex.
GetWidthIndex()];
94 unsigned int inHeight = inputShape[dataLayoutIndex.
GetHeightIndex()];
95 unsigned int inBatchSize = inputShape[0];
97 unsigned int filterWidth = filterShape[dataLayoutIndex.
GetWidthIndex()];
98 unsigned int dilatedFilterWidth = filterWidth + (
m_Param.
m_DilationX - 1) * (filterWidth - 1);
102 unsigned int filterHeight = filterShape[dataLayoutIndex.
GetHeightIndex()];
103 unsigned int dilatedFilterHeight = filterHeight + (
m_Param.
m_DilationY - 1) * (filterHeight - 1);
107 unsigned int outChannels = filterShape[0];
108 unsigned int outBatchSize = inBatchSize;
111 TensorShape( { outBatchSize, outHeight, outWidth, outChannels } ) :
112 TensorShape( { outBatchSize, outChannels, outHeight, outWidth });
114 return std::vector<TensorShape>({ tensorShape });
134 if (inferredShapes.size() != 1)
137 + std::to_string(inferredShapes.size()) +
138 " elements - should only have 1.");
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
This layer represents a convolution 2d operation.
ImmutableConstantTensors GetConstantTensorsByRef() const override
Retrieve the handles to the constant values connected to the layer.
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of Convolution2dLayer.
Convolution2dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Convolution2dLayer(const Convolution2dDescriptor ¶m, const char *name)
Constructor to create a Convolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Convolution2d type.
Base class for all ArmNN exceptions so that users can filter to just those.
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >> ImmutableConstantTensors
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
const char * GetName() const override
Returns the name of the layer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void SetAdditionalInfo(QueueDescriptor &descriptor) const
ShapeInferenceMethod m_ShapeInferenceMethod
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string (currently used in DotSerializer and company).
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Convolution2dDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
Layer::ImmutableConstantTensors GetConnectedConstantAsInputTensors() const
const Convolution2dDescriptor & GetParameters() const override
const TensorInfo & GetTensorInfo() const override
const TensorShape & GetShape() const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
unsigned int GetWidthIndex() const
unsigned int GetHeightIndex() const
Copyright (c) 2021 ARM Limited and Contributors.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
uint32_t GetNumInputs(bool biasEnabled)
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationY
Dilation along y axis.
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumInputs() const
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.